entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
L2N | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ex/cexoy7skdgz4pe3g6rsi3wsvi27lnn2dizv4vte5c5vkx22u7fn2.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.autograd
def l2n(x, eps=1e-06):
return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x)
class L2N(nn.Module):
def __init__(self, eps=1e-06):
super(L2N, self).__init__()
self.eps = eps
def forward(self, x):
return l2n(x, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'eps=' + str(self.eps) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def l2n(x, eps=1e-06):
return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x)
class L2NNew(nn.Module):
def __init__(self, eps=1e-06):
super(L2NNew, self).__init__()
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(' + 'eps=' + str(self.eps) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bestfitting/instance_level_recognition | L2N | false | 14,942 | [
"Apache-2.0"
]
| 103 | 683f021b4e65876835f028797ec28b0d1071bb45 | https://github.com/bestfitting/instance_level_recognition/tree/683f021b4e65876835f028797ec28b0d1071bb45 |
Conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6p/c6p2synorvg3uvlsfefdr424l5lgj7xyt4ykg732glc2kxjbetn6.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_per_fused__weight_norm_interface_0 = async_compile.triton('triton_per_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (12*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (12*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6x/c6xaexatzxr4vnzybtgtblji5ltakjr2daw3uf75u4az2rkqkvj3.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [1], [2], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 6) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_per_fused__weight_norm_interface_0.run(buf1, primals_2, primals_1, buf2, 4, 12, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 6), (24, 6, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_3, 96, grid=grid(96), stream=stream0)
del primals_3
return (reinterpret_tensor(buf4, (4, 4, 4), (24, 6, 1), 0), buf2, primals_1, primals_2, primals_4, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1,
causal=True):
super(Conv, self).__init__()
self.causal = causal
if self.causal:
self.padding = dilation * (kernel_size - 1)
else:
self.padding = dilation * (kernel_size - 1) // 2
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
dilation=dilation, padding=self.padding)
self.conv = nn.utils.weight_norm(self.conv)
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, tensor):
out = self.conv(tensor)
if self.causal and self.padding != 0:
out = out[:, :, :-self.padding]
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 12 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 12 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 6 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2,
primals_1, buf2, 4, 12, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1,),
padding=(2,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 6), (24, 6, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(96)](buf4, primals_3, 96,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf4, (4, 4, 4), (24, 6, 1), 0
), buf2, primals_1, primals_2, primals_4, buf1, buf2
class ConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1,
causal=True):
super(ConvNew, self).__init__()
self.causal = causal
if self.causal:
self.padding = dilation * (kernel_size - 1)
else:
self.padding = dilation * (kernel_size - 1) // 2
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
dilation=dilation, padding=self.padding)
self.conv = nn.utils.weight_norm(self.conv)
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, input_0):
primals_3 = self.conv.bias
primals_1 = self.conv.weight_g
primals_2 = self.conv.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| batikim09/FloWaveNet | Conv | false | 14,943 | [
"MIT"
]
| 499 | 791f51aff530b2af4f9aa0d9fcb4af53d28a0997 | https://github.com/batikim09/FloWaveNet/tree/791f51aff530b2af4f9aa0d9fcb4af53d28a0997 |
BertAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attention_scores => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fh/cfhydrwaejluqurspx2dvattjq4qiiexqog4gfsdmfob43r5rnk5.py
# Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attention_probs => amax, exp, sub, sum_1
# attention_scores_1 => div
# attention_scores_2 => add
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_8), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_div_1 = async_compile.triton('triton_poi_fused__softmax_add_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x2), tmp19, xmask)
tl.store(out_ptr1 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/g6/cg65jkqmchbdahuikqawep5p32pz7fmy6cfvgzohspjy5l4iqhrs.py
# Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attention_probs => amax, div_1, exp, sub
# attention_scores_1 => div
# attention_scores_2 => add
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_8), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_div_2 = async_compile.triton('triton_poi_fused__softmax_add_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cg/ccgam4ht5mlftilh4n7egkpae3rmeejlbjgqfuszoxncbkog3h4g.py
# Topologically Sorted Source Nodes: [add_1, u, s, s_1, s_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# s => sub_1
# s_1 => mul
# s_2 => mean_1
# u => mean
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %sub_1), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [-1], True), kwargs = {})
triton_poi_fused_add_mean_mul_sub_4 = async_compile.triton('triton_poi_fused_add_mean_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ai/caipfk2erpwmcdcz3nupnay52ivkphhvned5iifefb24ymnvwlwk.py
# Topologically Sorted Source Nodes: [add_1, u, s, add_2, sqrt, x_3, mul_1, x_4], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# mul_1 => mul_1
# s => sub_1
# sqrt => sqrt
# u => mean
# x_3 => div_2
# x_4 => add_3
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_5 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax]
triton_poi_fused__softmax_add_div_1.run(buf5, primals_8, buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax]
triton_poi_fused__softmax_add_div_2.run(buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0)
del primals_8
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_7, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_10
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, s, s_1, s_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.mul]
triton_poi_fused_add_mean_mul_sub_4.run(buf12, primals_3, buf13, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, s, add_2, sqrt, x_3, mul_1, x_4], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_sqrt_sub_5.run(primals_11, buf12, primals_3, buf13, buf14, primals_12, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_12
return (buf15, primals_3, primals_11, buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
from torch.nn import Module
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class BertLayerNorm(Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
self.shape = torch.Size((hidden_size,))
self.eps = eps
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = x - u
s = s * s
s = s.mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight * x + self.bias
return x
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 1, 3)
def transpose_key_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 3, 1)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer)
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = torch.reshape(context_layer, new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
import math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_mul_sub_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_div_1[grid(64)](buf5, primals_8, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_add_div_2[grid(256)](buf8, primals_8,
buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_10
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_mul_sub_4[grid(16)](buf12, primals_3,
buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_5[grid(64)](primals_11,
buf12, primals_3, buf13, buf14, primals_12, buf15, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_12
return buf15, primals_3, primals_11, buf8, reinterpret_tensor(buf11, (
16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertLayerNorm(Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
self.shape = torch.Size((hidden_size,))
self.eps = eps
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = x - u
s = s * s
s = s.mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight * x + self.bias
return x
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 1, 3)
def transpose_key_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 3, 1)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer)
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = torch.reshape(context_layer, new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| axiserr/Hetu | BertAttention | false | 14,944 | [
"Apache-2.0"
]
| 82 | 0052f727488db0570d6b37f63549b43b0920bc29 | https://github.com/axiserr/Hetu/tree/0052f727488db0570d6b37f63549b43b0920bc29 |
CRF | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2b/c2bkqvv3w46os3y2jfp7zebz4v4e3jusxayajeubp3hevuxzgzn6.py
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_1 => add_1
# max_1 => max_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_2, %unsqueeze_1), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1), kwargs = {})
triton_poi_fused_add_max_0 = async_compile.triton('triton_poi_fused_add_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp5, tmp11)
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp27 = tmp5 > tmp11
tmp28 = tmp5 == tmp11
tmp29 = tmp5 != tmp5
tmp30 = tmp11 != tmp11
tmp31 = tmp29 > tmp30
tmp32 = tmp27 | tmp31
tmp33 = tmp29 & tmp30
tmp34 = tmp28 | tmp33
tmp35 = tl.full([1], 0, tl.int64)
tmp36 = tl.full([1], 1, tl.int64)
tmp37 = tmp35 < tmp36
tmp38 = tmp34 & tmp37
tmp39 = tmp32 | tmp38
tmp40 = tl.where(tmp39, tmp5, tmp11)
tmp41 = tl.where(tmp39, tmp35, tmp36)
tmp42 = tmp40 > tmp18
tmp43 = tmp40 == tmp18
tmp44 = tmp40 != tmp40
tmp45 = tmp18 != tmp18
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp41 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp40, tmp18)
tmp55 = tl.where(tmp53, tmp41, tmp50)
tmp56 = tmp54 > tmp25
tmp57 = tmp54 == tmp25
tmp58 = tmp54 != tmp54
tmp59 = tmp25 != tmp25
tmp60 = tmp58 > tmp59
tmp61 = tmp56 | tmp60
tmp62 = tmp58 & tmp59
tmp63 = tmp57 | tmp62
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp55 < tmp64
tmp66 = tmp63 & tmp65
tmp67 = tmp61 | tmp66
tmp68 = tl.where(tmp67, tmp54, tmp25)
tmp69 = tl.where(tmp67, tmp55, tmp64)
tl.store(out_ptr0 + (x2), tmp26, xmask)
tl.store(out_ptr1 + (x2), tmp69, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dy/cdy673rrkjiv3qikoqrlvr3x4djhltg5txuuelkcf3bbizc3xryu.py
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_3 => add_3
# max_2 => max_2
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_3, %unsqueeze_1), kwargs = {})
# %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_3, 1), kwargs = {})
triton_poi_fused_add_max_1 = async_compile.triton('triton_poi_fused_add_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5 + (16*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (6 + (16*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (7 + (16*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tmp64 = tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + (x2), tmp22, xmask)
tl.store(out_ptr1 + (x2), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y6/cy6aidx4xjpi7w6qjmhugi7jyuzpadheqvm6f3ovuoln3kiyejvo.py
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_5 => add_5
# max_3 => max_3
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_4, %unsqueeze_1), kwargs = {})
# %max_3 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_5, 1), kwargs = {})
triton_poi_fused_add_max_2 = async_compile.triton('triton_poi_fused_add_max_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (9 + (16*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (10 + (16*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (11 + (16*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tmp64 = tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + (x2), tmp22, xmask)
tl.store(out_ptr1 + (x2), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pz/cpzuxuwhwbn22vaq3ndvkouqpztabhutjiktjp3d5vxwx3ptniiy.py
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
# Source node to ATen node mapping:
# add_7 => add_7
# max_4 => max_4
# tag_1 => gather
# tag_2 => gather_1
# tag_3 => gather_2
# v_6 => add_6
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, %select_3), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %unsqueeze_5), kwargs = {})
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_7, 1, True), kwargs = {})
# %gather : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_5, 1, %getitem_7), kwargs = {})
# %gather_1 : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_3, 1, %gather), kwargs = {})
# %gather_2 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_1, 1, %gather_1), kwargs = {})
triton_poi_fused_add_gather_max_3 = async_compile.triton('triton_poi_fused_add_gather_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i64', 4: '*i64', 5: '*i64', 6: '*i64', 7: '*i64', 8: '*i64', 9: '*i64', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gather_max_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (2))
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp47 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr2 + (3))
tmp51 = tl.broadcast_to(tmp50, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp8 + tmp10
tmp12 = tmp5 > tmp11
tmp13 = tmp5 == tmp11
tmp14 = tmp5 != tmp5
tmp15 = tmp11 != tmp11
tmp16 = tmp14 > tmp15
tmp17 = tmp12 | tmp16
tmp18 = tmp14 & tmp15
tmp19 = tmp13 | tmp18
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 1, tl.int64)
tmp22 = tmp20 < tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 | tmp23
tmp25 = tl.where(tmp24, tmp5, tmp11)
tmp26 = tl.where(tmp24, tmp20, tmp21)
tmp29 = tmp27 + tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp25 > tmp32
tmp34 = tmp25 == tmp32
tmp35 = tmp25 != tmp25
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 2, tl.int64)
tmp42 = tmp26 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp25, tmp32)
tmp46 = tl.where(tmp44, tmp26, tmp41)
tmp49 = tmp47 + tmp48
tmp52 = tmp49 + tmp51
tmp53 = tmp45 > tmp52
tmp54 = tmp45 == tmp52
tmp55 = tmp45 != tmp45
tmp56 = tmp52 != tmp52
tmp57 = tmp55 > tmp56
tmp58 = tmp53 | tmp57
tmp59 = tmp55 & tmp56
tmp60 = tmp54 | tmp59
tmp61 = tl.full([1], 3, tl.int64)
tmp62 = tmp46 < tmp61
tmp63 = tmp60 & tmp62
tmp64 = tmp58 | tmp63
tmp65 = tl.where(tmp64, tmp45, tmp52)
tmp66 = tl.where(tmp64, tmp46, tmp61)
tmp67 = tl.full([XBLOCK], 4, tl.int32)
tmp68 = tmp66 + tmp67
tmp69 = tmp66 < 0
tmp70 = tl.where(tmp69, tmp68, tmp66)
tl.device_assert(((0 <= tmp70) & (tmp70 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp70 < 4")
tmp72 = tl.load(in_ptr3 + (tmp70 + (4*x0)), xmask, eviction_policy='evict_last')
tmp73 = tmp72 + tmp67
tmp74 = tmp72 < 0
tmp75 = tl.where(tmp74, tmp73, tmp72)
tl.device_assert(((0 <= tmp75) & (tmp75 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp75 < 4")
tmp77 = tl.load(in_ptr4 + (tmp75 + (4*x0)), xmask, eviction_policy='evict_last')
tmp78 = tmp77 + tmp67
tmp79 = tmp77 < 0
tmp80 = tl.where(tmp79, tmp78, tmp77)
tl.device_assert(((0 <= tmp80) & (tmp80 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp80 < 4")
tmp82 = tl.load(in_ptr5 + (tmp80 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (4*x0), tmp66, xmask)
tl.store(out_ptr1 + (4*x0), tmp82, xmask)
tl.store(out_ptr2 + (4*x0), tmp77, xmask)
tl.store(out_ptr3 + (4*x0), tmp72, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_add_max_0.run(arg0_1, arg1_1, arg2_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_1.run(buf0, arg0_1, arg2_1, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf0; del buf0 # reuse
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_2.run(buf2, arg0_1, arg2_1, buf4, buf5, 16, grid=grid(16), stream=stream0)
del arg2_1
del buf2
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3) # alias
buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0) # alias
buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1) # alias
buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2) # alias
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
triton_poi_fused_add_gather_max_3.run(buf4, arg0_1, arg3_1, buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, grid=grid(4), stream=stream0)
del arg0_1
del arg3_1
del buf1
del buf3
del buf4
del buf5
return (buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match ', feats
.shape, tags.shape)
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_tags': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp5, tmp11)
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp27 = tmp5 > tmp11
tmp28 = tmp5 == tmp11
tmp29 = tmp5 != tmp5
tmp30 = tmp11 != tmp11
tmp31 = tmp29 > tmp30
tmp32 = tmp27 | tmp31
tmp33 = tmp29 & tmp30
tmp34 = tmp28 | tmp33
tmp35 = tl.full([1], 0, tl.int64)
tmp36 = tl.full([1], 1, tl.int64)
tmp37 = tmp35 < tmp36
tmp38 = tmp34 & tmp37
tmp39 = tmp32 | tmp38
tmp40 = tl.where(tmp39, tmp5, tmp11)
tmp41 = tl.where(tmp39, tmp35, tmp36)
tmp42 = tmp40 > tmp18
tmp43 = tmp40 == tmp18
tmp44 = tmp40 != tmp40
tmp45 = tmp18 != tmp18
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp41 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp40, tmp18)
tmp55 = tl.where(tmp53, tmp41, tmp50)
tmp56 = tmp54 > tmp25
tmp57 = tmp54 == tmp25
tmp58 = tmp54 != tmp54
tmp59 = tmp25 != tmp25
tmp60 = tmp58 > tmp59
tmp61 = tmp56 | tmp60
tmp62 = tmp58 & tmp59
tmp63 = tmp57 | tmp62
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp55 < tmp64
tmp66 = tmp63 & tmp65
tmp67 = tmp61 | tmp66
tl.where(tmp67, tmp54, tmp25)
tmp69 = tl.where(tmp67, tmp55, tmp64)
tl.store(out_ptr0 + x2, tmp26, xmask)
tl.store(out_ptr1 + x2, tmp69, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr2 + 2)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp47 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr2 + 3)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp8 + tmp10
tmp12 = tmp5 > tmp11
tmp13 = tmp5 == tmp11
tmp14 = tmp5 != tmp5
tmp15 = tmp11 != tmp11
tmp16 = tmp14 > tmp15
tmp17 = tmp12 | tmp16
tmp18 = tmp14 & tmp15
tmp19 = tmp13 | tmp18
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 1, tl.int64)
tmp22 = tmp20 < tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 | tmp23
tmp25 = tl.where(tmp24, tmp5, tmp11)
tmp26 = tl.where(tmp24, tmp20, tmp21)
tmp29 = tmp27 + tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp25 > tmp32
tmp34 = tmp25 == tmp32
tmp35 = tmp25 != tmp25
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 2, tl.int64)
tmp42 = tmp26 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp25, tmp32)
tmp46 = tl.where(tmp44, tmp26, tmp41)
tmp49 = tmp47 + tmp48
tmp52 = tmp49 + tmp51
tmp53 = tmp45 > tmp52
tmp54 = tmp45 == tmp52
tmp55 = tmp45 != tmp45
tmp56 = tmp52 != tmp52
tmp57 = tmp55 > tmp56
tmp58 = tmp53 | tmp57
tmp59 = tmp55 & tmp56
tmp60 = tmp54 | tmp59
tmp61 = tl.full([1], 3, tl.int64)
tmp62 = tmp46 < tmp61
tmp63 = tmp60 & tmp62
tmp64 = tmp58 | tmp63
tl.where(tmp64, tmp45, tmp52)
tmp66 = tl.where(tmp64, tmp46, tmp61)
tmp67 = tl.full([XBLOCK], 4, tl.int32)
tmp68 = tmp66 + tmp67
tmp69 = tmp66 < 0
tmp70 = tl.where(tmp69, tmp68, tmp66)
tl.device_assert((0 <= tmp70) & (tmp70 < 4) | ~xmask,
'index out of bounds: 0 <= tmp70 < 4')
tmp72 = tl.load(in_ptr3 + (tmp70 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tmp72 + tmp67
tmp74 = tmp72 < 0
tmp75 = tl.where(tmp74, tmp73, tmp72)
tl.device_assert((0 <= tmp75) & (tmp75 < 4) | ~xmask,
'index out of bounds: 0 <= tmp75 < 4')
tmp77 = tl.load(in_ptr4 + (tmp75 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp78 = tmp77 + tmp67
tmp79 = tmp77 < 0
tmp80 = tl.where(tmp79, tmp78, tmp77)
tl.device_assert((0 <= tmp80) & (tmp80 < 4) | ~xmask,
'index out of bounds: 0 <= tmp80 < 4')
tmp82 = tl.load(in_ptr5 + (tmp80 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp66, xmask)
tl.store(out_ptr1 + 4 * x0, tmp82, xmask)
tl.store(out_ptr2 + 4 * x0, tmp77, xmask)
tl.store(out_ptr3 + 4 * x0, tmp72, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf0, arg0_1, arg2_1, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = buf0
del buf0
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf2, arg0_1, arg2_1, buf4,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg2_1
del buf2
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3)
buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0)
buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1)
buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf4, arg0_1, arg3_1,
buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg0_1
del arg3_1
del buf1
del buf3
del buf4
del buf5
return buf10,
class CRFNew(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRFNew, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match ', feats
.shape, tags.shape)
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def forward(self, input_0):
arg2_1 = self.transitions
arg1_1 = self.start_transitions
arg3_1 = self.stop_transitions
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| ay94/CrossNER | CRF | false | 14,945 | [
"MIT"
]
| 77 | 2e7ba2a7798c961e3f29fbc51252c5a8d40224bf | https://github.com/ay94/CrossNER/tree/2e7ba2a7798c961e3f29fbc51252c5a8d40224bf |
MuSigmaEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hh/chhqj73wauyyzrvkvg6ib7lh5drbg7bq7ddygbqpehxytxvjdtkd.py
# Topologically Sorted Source Nodes: [sigmoid, mul, sigma], Original ATen: [aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# sigma => add
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 0.9), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.9
tmp3 = tmp1 * tmp2
tmp4 = 0.1
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, sigma], Original ATen: [aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_6, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class MuSigmaEncoder(nn.Module):
"""
Maps a representation r to mu and sigma which will define the normal
distribution from which we sample the latent variable z.
Parameters
----------
r_dim : int
Dimension of output representation r.
z_dim : int
Dimension of latent variable z.
"""
def __init__(self, r_dim, z_dim):
super(MuSigmaEncoder, self).__init__()
self.r_dim = r_dim
self.z_dim = z_dim
self.r_to_hidden = nn.Linear(r_dim, r_dim)
self.hidden_to_mu = nn.Linear(r_dim, z_dim)
self.hidden_to_sigma = nn.Linear(r_dim, z_dim)
def forward(self, r):
"""
r : torch.Tensor
Shape (batch_size, r_dim)
"""
hidden = torch.relu(self.r_to_hidden(r))
mu = self.hidden_to_mu(hidden)
sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden))
return mu, sigma
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'r_dim': 4, 'z_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.9
tmp3 = tmp1 * tmp2
tmp4 = 0.1
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf3, primals_6, primals_4, buf5
class MuSigmaEncoderNew(nn.Module):
"""
Maps a representation r to mu and sigma which will define the normal
distribution from which we sample the latent variable z.
Parameters
----------
r_dim : int
Dimension of output representation r.
z_dim : int
Dimension of latent variable z.
"""
def __init__(self, r_dim, z_dim):
super(MuSigmaEncoderNew, self).__init__()
self.r_dim = r_dim
self.z_dim = z_dim
self.r_to_hidden = nn.Linear(r_dim, r_dim)
self.hidden_to_mu = nn.Linear(r_dim, z_dim)
self.hidden_to_sigma = nn.Linear(r_dim, z_dim)
def forward(self, input_0):
primals_1 = self.r_to_hidden.weight
primals_2 = self.r_to_hidden.bias
primals_4 = self.hidden_to_mu.weight
primals_5 = self.hidden_to_mu.bias
primals_6 = self.hidden_to_sigma.weight
primals_7 = self.hidden_to_sigma.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| benjaminalt/neural-processes | MuSigmaEncoder | false | 14,946 | [
"MIT"
]
| 170 | 03d4f921fe0598c77787eecc53cbed23e326a5f5 | https://github.com/benjaminalt/neural-processes/tree/03d4f921fe0598c77787eecc53cbed23e326a5f5 |
SigmoidFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ph/cphjj4ql64z4g7wuq3p3rhtts7pdoc44axs6iaidh56aibjgxqwt.py
# Topologically Sorted Source Nodes: [eq, float_1, neg, mul_2, p, sub, pow_1, log, term1, mul_3, ne, ge, mul_4, float_2, mul_5, pow_2, sub_1, log_1, term2, mul_6, loss, sum_1], Original ATen: [aten.eq, aten._to_copy, aten.neg, aten.mul, aten.sigmoid, aten.rsub, aten.pow, aten.log, aten.ne, aten.ge, aten.sub, aten.sum]
# Source node to ATen node mapping:
# eq => eq
# float_1 => convert_element_type_1
# float_2 => convert_element_type_2
# ge => ge
# log => log
# log_1 => log_1
# loss => sub_2
# mul_2 => mul_3
# mul_3 => mul_4
# mul_4 => mul_5
# mul_5 => mul_6
# mul_6 => mul_7
# ne => ne
# neg => neg
# p => sigmoid
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# term1 => mul_1
# term2 => mul_2
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_1, %unsqueeze), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%convert_element_type_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, 4), kwargs = {})
# %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %log), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %mul_1), kwargs = {})
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Tensor](args = (%unsqueeze_1, %unsqueeze), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%unsqueeze_1, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ne, %ge), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.float32), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, -3), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 4), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %log_1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %mul_7), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = (rindex // 256)
r5 = rindex % 64
r0 = rindex % 4
r7 = rindex % 256
r4 = rindex
tmp0 = tl.load(in_ptr0 + (r5 + (64*r3)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (r7), None, eviction_policy='evict_last')
tmp1 = 1 + r0
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = -tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp9 = tl.sigmoid(tmp8)
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl_math.log(tmp9)
tmp15 = tmp13 * tmp14
tmp16 = tmp7 * tmp15
tmp17 = tmp0 != tmp2
tmp18 = 0.0
tmp19 = tmp0 >= tmp18
tmp20 = tmp17 & tmp19
tmp21 = tmp20.to(tl.float32)
tmp22 = -3.0
tmp23 = tmp21 * tmp22
tmp24 = tmp9 * tmp9
tmp25 = tmp24 * tmp24
tmp26 = tl_math.log(tmp11)
tmp27 = tmp25 * tmp26
tmp28 = tmp23 * tmp27
tmp29 = tmp16 - tmp28
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp32, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [eq, float_1, neg, mul_2, p, sub, pow_1, log, term1, mul_3, ne, ge, mul_4, float_2, mul_5, pow_2, sub_1, log_1, term2, mul_6, loss, sum_1], Original ATen: [aten.eq, aten._to_copy, aten.neg, aten.mul, aten.sigmoid, aten.rsub, aten.pow, aten.log, aten.ne, aten.ge, aten.sub, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0.run(arg1_1, arg0_1, buf1, 1, 1024, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, out, target):
n_class = out.shape[1]
class_ids = torch.arange(1, n_class + 1, dtype=target.dtype, device
=target.device).unsqueeze(0)
t = target.unsqueeze(1)
p = torch.sigmoid(out)
gamma = self.gamma
alpha = self.alpha
term1 = (1 - p) ** gamma * torch.log(p)
term2 = p ** gamma * torch.log(1 - p)
loss = -(t == class_ids).float() * alpha * term1 - ((t != class_ids
) * (t >= 0)).float() * (1 - alpha) * term2
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'gamma': 4, 'alpha': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0(
in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex // 256
r5 = rindex % 64
r0 = rindex % 4
r7 = rindex % 256
tmp0 = tl.load(in_ptr0 + (r5 + 64 * r3), None, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr1 + r7, None, eviction_policy='evict_last')
tmp1 = 1 + r0
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = -tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp9 = tl.sigmoid(tmp8)
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl_math.log(tmp9)
tmp15 = tmp13 * tmp14
tmp16 = tmp7 * tmp15
tmp17 = tmp0 != tmp2
tmp18 = 0.0
tmp19 = tmp0 >= tmp18
tmp20 = tmp17 & tmp19
tmp21 = tmp20.to(tl.float32)
tmp22 = -3.0
tmp23 = tmp21 * tmp22
tmp24 = tmp9 * tmp9
tmp25 = tmp24 * tmp24
tmp26 = tl_math.log(tmp11)
tmp27 = tmp25 * tmp26
tmp28 = tmp23 * tmp27
tmp29 = tmp16 - tmp28
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0[
grid(1)](arg1_1, arg0_1, buf1, 1, 1024, num_warps=8, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SigmoidFocalLossNew(nn.Module):
def __init__(self, gamma, alpha):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| berserkrambo/fcos-pytorch | SigmoidFocalLoss | false | 14,947 | [
"MIT"
]
| 63 | a064eccf6d45fc85da401151dcefe7a3b01a065b | https://github.com/berserkrambo/fcos-pytorch/tree/a064eccf6d45fc85da401151dcefe7a3b01a065b |
RNNAgent | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zb/czbrdc6746xv7kfxrqkzgbhm74ijdfuyfd3sz3llzzwzm6wzxmfi.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/x6/cx6p6kw5rikjy5fiw2bgc3fdsebg3hm7ziyw22fhoelc43uoo5ql.py
# Topologically Sorted Source Nodes: [hx], Original ATen: [aten.zeros]
# Source node to ATen node mapping:
# hx => full_default
# Graph fragment:
# %full_default : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([16, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_zeros_1 = async_compile.triton('triton_poi_fused_zeros_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_zeros_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12, ), (1, ))
assert_size_stride(primals_7, (12, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hx], Original ATen: [aten.zeros]
triton_poi_fused_zeros_1.run(buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf4)
del primals_5
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten._thnn_fused_gru_cell]
buf5 = torch.ops.aten._thnn_fused_gru_cell.default(buf3, buf4, buf2, primals_6, primals_7)
del buf3
del buf4
del primals_6
del primals_7
buf6 = buf5[0]
buf7 = buf5[1]
del buf5
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf6, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_9
return (reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf1, buf2, buf6, buf7, primals_8, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
import torch.nn as nn
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state=None):
b, a, e = inputs.size()
x = F.relu(self.fc1(inputs.view(-1, e)), inplace=True)
if hidden_state is not None:
hidden_state = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, hidden_state)
q = self.fc2(h)
return q.view(b, a, -1), h.view(b, a, -1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_shape': 4, 'args': _mock_config(rnn_hidden_dim=4,
n_actions=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_zeros_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12,), (1,))
assert_size_stride(primals_7, (12,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_zeros_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 12), (1,
4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 12), (1,
4), 0), out=buf4)
del primals_5
buf5 = torch.ops.aten._thnn_fused_gru_cell.default(buf3, buf4, buf2,
primals_6, primals_7)
del buf3
del buf4
del primals_6
del primals_7
buf6 = buf5[0]
buf7 = buf5[1]
del buf5
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf6, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_9
return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf1, buf2, buf6, buf7, primals_8, primals_4
class RNNAgentNew(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgentNew, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.rnn.weight_ih
primals_5 = self.rnn.weight_hh
primals_6 = self.rnn.bias_ih
primals_7 = self.rnn.bias_hh
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| benellis3/pymarl2 | RNNAgent | false | 14,948 | [
"Apache-2.0"
]
| 401 | 0875995a0e0b9692ea64484478b369c7f6c0cf44 | https://github.com/benellis3/pymarl2/tree/0875995a0e0b9692ea64484478b369c7f6c0cf44 |
Masked_MSE_Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mh/cmhsawlltibjgv5uskzse4pnzcaz7l5kynoonwgf7wtqdfpp5z6q.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mse_loss]
# Source node to ATen node mapping:
# loss => pow_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
triton_poi_fused_mse_loss_0 = async_compile.triton('triton_poi_fused_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mse_loss_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mse_loss_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mse_loss]
stream0 = get_raw_stream(0)
triton_poi_fused_mse_loss_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def check_loss_input(im0, im1, w):
""" im0 is out and im1 is target and w is mask"""
assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch'
if w is not None:
assert list(im0.size())[2:] == list(w.size())[2:
], 'spatial dim mismatch'
if im1.size(0) != 1:
assert im0.size(0) == im1.size(0)
if w is not None and w.size(0) != 1:
assert im0.size(0) == w.size(0)
return
class Masked_MSE_Loss(nn.Module):
def __init__(self):
super(Masked_MSE_Loss, self).__init__()
self.loss = nn.MSELoss(reduction='none')
def forward(self, pred, ref, w=None):
""" ims have dimension BCHW while mask is B1HW """
check_loss_input(pred, ref, w)
loss = self.loss(pred, ref)
assert pred.shape[1] == ref.shape[1]
channels = pred.shape[1]
if w is not None:
w = w.repeat(1, channels, 1, 1)
n = torch.sum(loss * w, [1, 2, 3])
d = torch.sum(w, [1, 2, 3])
loss = n / d
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mse_loss_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mse_loss_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def check_loss_input(im0, im1, w):
""" im0 is out and im1 is target and w is mask"""
assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch'
if w is not None:
assert list(im0.size())[2:] == list(w.size())[2:
], 'spatial dim mismatch'
if im1.size(0) != 1:
assert im0.size(0) == im1.size(0)
if w is not None and w.size(0) != 1:
assert im0.size(0) == w.size(0)
return
class Masked_MSE_LossNew(nn.Module):
def __init__(self):
super(Masked_MSE_LossNew, self).__init__()
self.loss = nn.MSELoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bg459/gan-ensembling-loader | Masked_MSE_Loss | false | 14,949 | [
"MIT"
]
| 86 | 5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8 | https://github.com/bg459/gan-ensembling-loader/tree/5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8 |
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/eg/cegcfgu2nkbhmj5wg4jmovsw6ufrg35tchikhmkt324e44bhzuuw.py
# Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div]
# Source node to ATen node mapping:
# queries_2 => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_10, 1.4142135623730951), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 4)) + (16*x2) + (64*(x1 // 4))), xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5w/c5wgyuixdgs4q67lp3jznv3am6mg5w4566ld452z3mowdpdxq3zq.py
# Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div]
# Source node to ATen node mapping:
# keys_2 => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 1.4142135623730951), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x2 % 4)) + (16*x1) + (64*(x2 // 4))), xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dot_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dot_1 => div_2, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4h/c4hbvy7jxbw3dzzn56ed6w3oq5x3l5zczksk2h3ncwnjhu72g2m4.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_11 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%div, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_5 = async_compile.triton('triton_poi_fused_transpose_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (64*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((16, 4, 4), (4, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(buf1, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf0, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [queries_2, dot], Original ATen: [aten.div, aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_5.run(buf3, buf11, 256, grid=grid(256), stream=stream0)
del buf3
return (reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), buf11, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class SelfAttention(nn.Module):
def __init__(self, input_size, heads, embed_size):
super().__init__()
self.input_size = input_size
self.heads = heads
self.emb_size = embed_size
self.tokeys = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.toqueries = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.tovalues = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
def forward(self, x):
b, t, hin = x.size()
assert hin == self.input_size, 'Input size {hin} should match {self.input_size}'
h = self.heads
e = self.emb_size
keys = self.tokeys(x).view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x).view(b, t, h, e)
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / e ** (1 / 4)
keys = keys / e ** (1 / 4)
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b * h, t, t)
dot = F.softmax(dot, dim=2)
out = torch.bmm(dot, values).view(b, h, t, e)
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'heads': 4, 'embed_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 4) + 16 * x2 + 64 * (x1 // 4)),
xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 4) + 16 * x1 + 64 * (x2 // 4)),
xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((16, 4, 4), (4, 64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](buf1, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_div_1[grid(256)](buf0, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (16, 4, 4), (16,
1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16,
4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0)
del buf9
triton_poi_fused_transpose_5[grid(256)](buf3, buf11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf3
return reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), buf11, buf4
class SelfAttentionNew(nn.Module):
def __init__(self, input_size, heads, embed_size):
super().__init__()
self.input_size = input_size
self.heads = heads
self.emb_size = embed_size
self.tokeys = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.toqueries = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.tovalues = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
def forward(self, input_0):
primals_2 = self.tokeys.weight
primals_3 = self.toqueries.weight
primals_4 = self.tovalues.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| benellis3/pymarl2 | SelfAttention | false | 14,950 | [
"Apache-2.0"
]
| 401 | 0875995a0e0b9692ea64484478b369c7f6c0cf44 | https://github.com/benellis3/pymarl2/tree/0875995a0e0b9692ea64484478b369c7f6c0cf44 |
HeatedUpScalar | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/e4/ce4h2z2vthjeez5cqvrjtnep5b7t7jnbbufxqxbrvyhke72ruy4y.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 4.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class HeatedUpScalar(nn.Module):
def __init__(self, first_value, last_value, nb_steps, scope='task', **
kwargs):
super().__init__()
self.scope = scope
self.first_value = first_value
self.step = (max(first_value, last_value) - min(first_value,
last_value)) / (nb_steps - 1)
if first_value > last_value:
self._factor = -1
else:
self._factor = 1
self._increment = 0
None
def on_task_end(self):
if self.scope == 'task':
self._increment += 1
None
def on_epoch_end(self):
if self.scope == 'epoch':
self._increment += 1
@property
def factor(self):
return self.first_value + self._factor * self._increment * self.step
def forward(self, inputs):
return self.factor * inputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'first_value': 4, 'last_value': 4, 'nb_steps': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HeatedUpScalarNew(nn.Module):
def __init__(self, first_value, last_value, nb_steps, scope='task', **
kwargs):
super().__init__()
self.scope = scope
self.first_value = first_value
self.step = (max(first_value, last_value) - min(first_value,
last_value)) / (nb_steps - 1)
if first_value > last_value:
self._factor = -1
else:
self._factor = 1
self._increment = 0
None
def on_task_end(self):
if self.scope == 'task':
self._increment += 1
None
def on_epoch_end(self):
if self.scope == 'epoch':
self._increment += 1
@property
def factor(self):
return self.first_value + self._factor * self._increment * self.step
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| billpsomas/incremental_learning.pytorch | HeatedUpScalar | false | 14,951 | [
"MIT"
]
| 277 | a401a6609fc61c74698739cf937c0ece1c10913f | https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f |
NacCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lz/clz2gei2r4v6zp73e5uwuxfqwprorq7wyulpi7djhsfrhn32mgcm.py
# Topologically Sorted Source Nodes: [tanh, sigmoid, W], Original ATen: [aten.tanh, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# W => mul
# sigmoid => sigmoid
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_1,), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, sigmoid, W], Original ATen: [aten.tanh, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0.run(primals_1, primals_2, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_uniform_
from torch.nn.functional import linear
from torch import sigmoid
from torch import tanh
class NacCell(nn.Module):
"""Basic NAC unit implementation
from https://arxiv.org/pdf/1808.00508.pdf
"""
def __init__(self, in_shape, out_shape):
"""
in_shape: input sample dimension
out_shape: output sample dimension
"""
super().__init__()
self.in_shape = in_shape
self.out_shape = out_shape
self.W_ = Parameter(Tensor(out_shape, in_shape))
self.M_ = Parameter(Tensor(out_shape, in_shape))
xavier_uniform_(self.W_), xavier_uniform_(self.M_)
self.register_parameter('bias', None)
def forward(self, input):
W = tanh(self.W_) * sigmoid(self.M_)
return linear(input, W, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_shape': 4, 'out_shape': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import Tensor
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_uniform_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_1, primals_2,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_2, reinterpret_tensor(primals_3, (64, 4), (4,
1), 0)
class NacCellNew(nn.Module):
"""Basic NAC unit implementation
from https://arxiv.org/pdf/1808.00508.pdf
"""
def __init__(self, in_shape, out_shape):
"""
in_shape: input sample dimension
out_shape: output sample dimension
"""
super().__init__()
self.in_shape = in_shape
self.out_shape = out_shape
self.W_ = Parameter(Tensor(out_shape, in_shape))
self.M_ = Parameter(Tensor(out_shape, in_shape))
xavier_uniform_(self.W_), xavier_uniform_(self.M_)
self.register_parameter('bias', None)
def forward(self, input_0):
primals_1 = self.W_
primals_2 = self.M_
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bharathgs/NALU | NacCell | false | 14,952 | [
"MIT"
]
| 118 | 5d52cc270786563b67837a3856841baafba20e60 | https://github.com/bharathgs/NALU/tree/5d52cc270786563b67837a3856841baafba20e60 |
Masked_L1_Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/y3/cy343dpuske6hgj274rt2smutsfxv636ejjwcjx72o2ng5rtwbie.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.sub, aten.abs]
# Source node to ATen node mapping:
# loss => abs_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
triton_poi_fused_abs_sub_0 = async_compile.triton('triton_poi_fused_abs_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.sub, aten.abs]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def check_loss_input(im0, im1, w):
""" im0 is out and im1 is target and w is mask"""
assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch'
if w is not None:
assert list(im0.size())[2:] == list(w.size())[2:
], 'spatial dim mismatch'
if im1.size(0) != 1:
assert im0.size(0) == im1.size(0)
if w is not None and w.size(0) != 1:
assert im0.size(0) == w.size(0)
return
class Masked_L1_Loss(nn.Module):
def __init__(self):
super(Masked_L1_Loss, self).__init__()
self.loss = nn.L1Loss(reduction='none')
def forward(self, pred, ref, w=None):
""" ims have dimension BCHW while mask is B1HW """
check_loss_input(pred, ref, w)
loss = self.loss(pred, ref)
assert pred.shape[1] == ref.shape[1]
channels = pred.shape[1]
if w is not None:
w = w.repeat(1, channels, 1, 1)
n = torch.sum(loss * w, [1, 2, 3])
d = torch.sum(w, [1, 2, 3])
loss = n / d
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def check_loss_input(im0, im1, w):
""" im0 is out and im1 is target and w is mask"""
assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch'
if w is not None:
assert list(im0.size())[2:] == list(w.size())[2:
], 'spatial dim mismatch'
if im1.size(0) != 1:
assert im0.size(0) == im1.size(0)
if w is not None and w.size(0) != 1:
assert im0.size(0) == w.size(0)
return
class Masked_L1_LossNew(nn.Module):
def __init__(self):
super(Masked_L1_LossNew, self).__init__()
self.loss = nn.L1Loss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bg459/gan-ensembling-loader | Masked_L1_Loss | false | 14,953 | [
"MIT"
]
| 86 | 5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8 | https://github.com/bg459/gan-ensembling-loader/tree/5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8 |
FixupBasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pb/cpbshtrpnaucbs7hqoiqkvsndkguzk4er52bxaovgoqcnlnyqv6v.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ii/ciidyz7k62ig6agzsjgqpv736t42hwampit52unv6yf3hsf7nm72.py
# Topologically Sorted Source Nodes: [add_1, out_1, add_2], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# out_1 => relu
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_5), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp8 = tmp5 + tmp7
tmp9 = 0.0
tmp10 = tmp5 <= tmp9
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ru/cruzz4bw56op4a2umcojvqo7x3yllo524t6ildzprzx27ua6kcm5.py
# Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# mul => mul
# out_3 => add_3
# out_4 => add_4
# out_5 => relu_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %primals_7), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_mul_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_add_mul_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + (x0), xmask)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add_1, out_1, add_2], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf1, primals_4, primals_5, buf2, buf6, 256, grid=grid(256), stream=stream0)
del primals_4
del primals_5
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_mul_relu_threshold_backward_2.run(buf3, primals_7, primals_8, primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_8
return (buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = conv3x3(inplanes, planes, stride)
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = conv3x3(planes, planes)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.relu(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
if self.downsample is not None:
identity = self.downsample(x + self.bias1a)
out += identity
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp8 = tmp5 + tmp7
tmp9 = 0.0
tmp10 = tmp5 <= tmp9
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf1,
primals_4, primals_5, buf2, buf6, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_4
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf1
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_mul_relu_threshold_backward_2[grid(256)](buf3,
primals_7, primals_8, primals_1, buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
del primals_8
return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class FixupBasicBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(FixupBasicBlockNew, self).__init__()
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = conv3x3(inplanes, planes, stride)
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = conv3x3(planes, planes)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
self.downsample = downsample
self.stride = stride
def forward(self, input_0):
primals_2 = self.bias1a
primals_4 = self.bias1b
primals_5 = self.bias2a
primals_7 = self.scale
primals_8 = self.bias2b
primals_3 = self.conv1.weight
primals_6 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| bethgelab/robustness | FixupBasicBlock | false | 14,954 | [
"Apache-2.0"
]
| 67 | aa0a6798fe3973bae5f47561721b59b39f126ab7 | https://github.com/bethgelab/robustness/tree/aa0a6798fe3973bae5f47561721b59b39f126ab7 |
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [-1], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mk/cmkim2hc4ksxhatli3y5cu7hoqofxcbzqrrxvnlhmswdt4cgww25.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7f/c7fwok6q7j5rvjs3ob32s2cth5xjbedhynzb5ozchylog57bhmxv.py
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => var
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_31), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True})
triton_poi_fused_add_mean_std_3 = async_compile.triton('triton_poi_fused_add_mean_std_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + (x0), tmp29, xmask)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dw/cdwd24bmovp4kvuenv3jq6ffpahgl34iziauouexc57lxivmzubp.py
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# mean => mean
# mul => mul
# std => sqrt
# sub => sub_4
# truediv_4 => div_8
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_31), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %sub_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_8, %primals_7), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_4 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/va/cvayouropyisaprtjrhemadbdvsels72axdjsrgmbayknhu335yc.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_33,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dg/cdg2dxfjk7prchu44e4cgkid2y4524hl5vpyijgt6dwrnsrwzz2k.py
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_3 => add_3
# Graph fragment:
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_35), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j4/cj4wrybpym5umgwi5ropl654n64ptcknq2hunhzirmo6b5jmhqyj.py
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add_4 => add_4
# add_5 => add_5
# mean_2 => mean_1
# mul_1 => mul_1
# std_2 => sqrt_1, var_1
# sub_1 => sub_5
# truediv_5 => div_9
# Graph fragment:
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_3, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_3, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %mean_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_12, %sub_5), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-06), kwargs = {})
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_9, %primals_13), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_7 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [query], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [key], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [dot_products_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10)
buf11 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [dot_products_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14)
buf15 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [dot_products_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18)
buf19 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0)
del buf10
del buf14
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0); del buf6 # reuse
buf22 = buf21; del buf21 # reuse
buf23 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
triton_poi_fused_add_mean_std_3.run(buf22, primals_2, buf20, buf23, 16, grid=grid(16), stream=stream0)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_4.run(primals_6, primals_2, buf20, buf23, buf22, primals_7, buf24, 64, grid=grid(64), stream=stream0)
del buf22
del buf23
del primals_7
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0); del buf25 # reuse
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf26, primals_9, buf30, 64, grid=grid(64), stream=stream0)
del primals_9
buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf28, buf24, primals_11, 64, grid=grid(64), stream=stream0)
del primals_11
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_7.run(primals_12, buf28, primals_13, buf29, 64, grid=grid(64), stream=stream0)
del primals_13
return (buf29, primals_2, primals_6, primals_12, buf5, buf9, buf13, buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(buf26, (16, 4), (4, 1), 0), buf28, primals_10, buf30, primals_8, primals_5, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.linear2 = nn.Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = nn.Linear(d_key, d_key, bias=False)
self.wk = nn.Linear(d_key, d_key, bias=False)
self.wv = nn.Linear(d_value, d_value, bias=False)
self.wo = nn.Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x):
return self.layernorm(x[0] + self.dropout(self.layer(*x)))
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads,
drop_ratio), d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, x):
return self.feedforward(self.selfattn(x, x, x))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_hidden': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 0), out=buf6)
buf7 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 1), out=buf10)
buf11 = buf8
del buf8
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 2), out=buf14)
buf15 = buf12
del buf12
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 3), out=buf18)
buf19 = buf16
del buf16
triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del buf14
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0)
del buf6
buf22 = buf21
del buf21
buf23 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0)
del buf18
triton_poi_fused_add_mean_std_3[grid(16)](buf22, primals_2, buf20,
buf23, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_6,
primals_2, buf20, buf23, buf22, primals_7, buf24, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf22
del buf23
del primals_7
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf26,
primals_9, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0)
del buf27
triton_poi_fused_add_6[grid(64)](buf28, buf24, primals_11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_12,
buf28, primals_13, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
return (buf29, primals_2, primals_6, primals_12, buf5, buf9, buf13,
buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20,
reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(
buf26, (16, 4), (4, 1), 0), buf28, primals_10, buf30, primals_8,
primals_5, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0))
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.linear2 = nn.Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = nn.Linear(d_key, d_key, bias=False)
self.wk = nn.Linear(d_key, d_key, bias=False)
self.wv = nn.Linear(d_value, d_value, bias=False)
self.wo = nn.Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x):
return self.layernorm(x[0] + self.dropout(self.layer(*x)))
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads,
drop_ratio), d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, input_0):
primals_1 = self.selfattn.layer.wq.weight
primals_3 = self.selfattn.layer.wk.weight
primals_4 = self.selfattn.layer.wv.weight
primals_5 = self.selfattn.layer.wo.weight
primals_6 = self.selfattn.layernorm.gamma
primals_7 = self.selfattn.layernorm.beta
primals_8 = self.feedforward.layer.linear1.weight
primals_9 = self.feedforward.layer.linear1.bias
primals_10 = self.feedforward.layer.linear2.weight
primals_11 = self.feedforward.layer.linear2.bias
primals_12 = self.feedforward.layernorm.gamma
primals_13 = self.feedforward.layernorm.beta
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| bhubanendra-mishra/dense-video-cap | EncoderLayer | false | 14,955 | [
"BSD-3-Clause"
]
| 174 | 43914e17769701b9cf98eda203ae4c465b315fab | https://github.com/bhubanendra-mishra/dense-video-cap/tree/43914e17769701b9cf98eda203ae4c465b315fab |
UNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bf/cbfcmpetk7os7dv3yos4vyukphcge6c34quatmmzj6ea67jnqfiz.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 48
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4o/c4o4nr2o2voxyxt3boc524hylptjse4khls4eesowiglszd6tibx.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_4 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d4/cd4s5ogbgu46xbdaa3oicwxi7l6pnddrap26pxiqzcpei77ta53h.py
# Topologically Sorted Source Nodes: [conv2d_3, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_5 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a4/ca43wvja2n3mesrfuj54dcwx324bk23dhpnatmpi7kjryanvrx2z.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_6 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fx/cfxzrbymgfo7nihrbt7qoan4due7fobewyznrfluqb33r6nwtkby.py
# Topologically Sorted Source Nodes: [conv2d_4, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_7 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 80
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcc3bk3hxiu24duxgu3sugydvegqdajcfglm4glhv4p3tp3scoo.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_8 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 5120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kq/ckqm6pdqoqvmdcuk5rc7jmmvbkn3yhwg76hr3kl77hai4czhsdhw.py
# Topologically Sorted Source Nodes: [conv2d_5, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# x_9 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 96
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zn/cznuy5umleve2hdzkop5qtkzyqpg4rv4gn3nz2clmlho3vrxoyqt.py
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_11 => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_9 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_9(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xm/cxmqm2kzqb5gfysvuziiqsi4yzuhoxlsn6xq353smxf7vc22kkeq.py
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_12 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index, %getitem_4], 1), kwargs = {})
triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40960
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 64) % 160
x1 = (xindex // 8) % 8
x0 = xindex % 8
x3 = (xindex // 10240)
x4 = xindex % 64
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + (4*tmp9) + (16*x2) + (1536*x3)), tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp4, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 160, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tl.load(in_ptr3 + (x4 + (64*((-96) + x2)) + (4096*x3)), tmp21, other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jt/cjtxk56ir7fpc7mhtiibevbqe4o27gcrjdmo6ey57nq5gj7gmsdx.py
# Topologically Sorted Source Nodes: [conv2d_7, x_13], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x_13 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28672
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 112
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c4/cc4qgyndfw32ullr3zgs36muefnxaiwplmi43mfkzzxn7kpub6wu.py
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_15 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_2, mul_4, mul_5
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_12 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jf/cjfd4rrgk3ktd3jne7s74agxqw6skl2pcs6z37wbkbwzii5c7k52.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_16 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index_1, %getitem_2], 1), kwargs = {})
triton_poi_fused_cat_13 = async_compile.triton('triton_poi_fused_cat_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 163840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 256) % 160
x1 = (xindex // 16) % 16
x0 = xindex % 16
x3 = (xindex // 40960)
x4 = xindex % 256
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 112, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 8, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + (8*tmp9) + (64*x2) + (7168*x3)), tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp4, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 160, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tl.load(in_ptr3 + (x4 + (256*((-112) + x2)) + (12288*x3)), tmp21, other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/up/cupp6yfko3scwbftnvebd5rjj7jfjflsoppjnw3erfvpwqq6mzeo.py
# Topologically Sorted Source Nodes: [conv2d_9, x_17], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_9 => convolution_9
# x_17 => relu_9
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {})
triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 96
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3jbyf5ylpcnip7fl3i4e3dqhcl5pfkrdyzumgnsa2b4past5le.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_19 => add_8, add_9, convert_element_type_8, convert_element_type_9, iota_4, mul_8, mul_9
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_4, 1), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 0), kwargs = {})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_8, torch.float32), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, 0.5), kwargs = {})
# %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_9, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_15 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zi/czinmdxup265556hlxnz3nrvllveifo2ih47ah4uexhqjxyyrnze.py
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_20 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index_2, %getitem], 1), kwargs = {})
triton_poi_fused_cat_16 = async_compile.triton('triton_poi_fused_cat_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 1024) % 128
x1 = (xindex // 32) % 32
x0 = xindex % 32
x3 = (xindex // 131072)
x4 = xindex % 1024
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 16, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + (16*tmp9) + (256*x2) + (24576*x3)), tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp4, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 128, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tl.load(in_ptr3 + (x4 + (1024*((-96) + x2)) + (32768*x3)), tmp21, other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dy/cdy25x4isqnjhtxhncdnfc6lk4a7ho2n2ioqz6k646bboh6phomn.py
# Topologically Sorted Source Nodes: [conv2d_11, x_21], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# x_21 => relu_11
# Graph fragment:
# %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_11 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {})
triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lc/clcrsu5s34immb6guobkppggbuvqp4z4ceacadyjt2r2vb5cnfrr.py
# Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_23 => add_12, add_13, convert_element_type_12, convert_element_type_13, iota_6, mul_12, mul_13
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_6, 1), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, 0), kwargs = {})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_12, torch.float32), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, 0.5), kwargs = {})
# %convert_element_type_13 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_13, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_18 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gf/cgfydmvmwrzfgg5bhhersaoh3tmtp62rvrxaraj5pcsbnsdswllh.py
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_24 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index_3, %primals_3], 1), kwargs = {})
triton_poi_fused_cat_19 = async_compile.triton('triton_poi_fused_cat_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1097728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 4096) % 67
x1 = (xindex // 64) % 64
x0 = xindex % 64
x3 = (xindex // 274432)
x4 = xindex % 4096
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 32, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + (32*tmp9) + (1024*x2) + (65536*x3)), tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp4, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 67, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tl.load(in_ptr3 + (x4 + (4096*((-64) + x2)) + (12288*x3)), tmp21, other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mn/cmnzvlrns4liaacos64uxihk3ysqvk2h2olkzpckjgk6qeuui5kc.py
# Topologically Sorted Source Nodes: [conv2d_13, x_25], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# x_25 => relu_13
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {})
triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e7/ce74uqtoket5nfthmxg424ua6qpeecce5sbwlb43qck4fh7zcxd5.py
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_27 => convolution_15
# Graph fragment:
# %convolution_15 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_21 = async_compile.triton('triton_poi_fused_convolution_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/le/cleoal43esje7bfpg4n7i3co7i2wjaa2lp7tmt2bu2tggvonxs52.py
# Topologically Sorted Source Nodes: [conv2d_12, x_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# x_22 => relu_12
# Graph fragment:
# %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_12, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_22 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cw/ccwvjoxicsph4brxyopxhqdg35kisxykobfkvcnqjdrmcvyb6iwt.py
# Topologically Sorted Source Nodes: [conv2d_10, x_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# x_18 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_10, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_23 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 96
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hf/chf7xvv6uah2tbybb7vzadgp55krmfafw57mems7ravvlmqiu5kh.py
# Topologically Sorted Source Nodes: [conv2d_8, x_14], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_8 => convolution_8
# x_14 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
# %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_8, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_24 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28672
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 112
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7f/c7fot224fzqg5rza4nr5ngllhbwm77mc3o7xzf5k67tsmntf2kum.py
# Topologically Sorted Source Nodes: [conv2d_6, x_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x_10 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %le_8 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_25 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_25(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 96
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (48, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (48, ), (1, ))
assert_size_stride(primals_8, (64, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (80, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (80, ), (1, ))
assert_size_stride(primals_12, (96, 80, 3, 3), (720, 9, 3, 1))
assert_size_stride(primals_13, (96, ), (1, ))
assert_size_stride(primals_14, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_15, (96, ), (1, ))
assert_size_stride(primals_16, (112, 160, 3, 3), (1440, 9, 3, 1))
assert_size_stride(primals_17, (112, ), (1, ))
assert_size_stride(primals_18, (112, 112, 3, 3), (1008, 9, 3, 1))
assert_size_stride(primals_19, (112, ), (1, ))
assert_size_stride(primals_20, (96, 160, 3, 3), (1440, 9, 3, 1))
assert_size_stride(primals_21, (96, ), (1, ))
assert_size_stride(primals_22, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_23, (96, ), (1, ))
assert_size_stride(primals_24, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_25, (64, ), (1, ))
assert_size_stride(primals_26, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_27, (64, ), (1, ))
assert_size_stride(primals_28, (64, 67, 3, 3), (603, 9, 3, 1))
assert_size_stride(primals_29, (64, ), (1, ))
assert_size_stride(primals_30, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_31, (32, ), (1, ))
assert_size_stride(primals_32, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_33, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 48, 32, 32), (49152, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 196608, grid=grid(196608), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1), torch.float32)
buf9 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf7, buf8, buf9, 49152, grid=grid(49152), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 16, 16), (16384, 256, 16, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf11, primals_9, 65536, grid=grid(65536), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.float32)
buf13 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf11, buf12, buf13, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 80, 8, 8), (5120, 64, 8, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf15, primals_11, 20480, grid=grid(20480), stream=stream0)
del primals_11
buf16 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch.float32)
buf17 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf15, buf16, buf17, 5120, grid=grid(5120), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 96, 4, 4), (1536, 16, 4, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf19, primals_13, 6144, grid=grid(6144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 96, 4, 4), (1536, 16, 4, 1))
buf21 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_9.run(buf21, 8, grid=grid(8), stream=stream0)
buf22 = empty_strided_cuda((4, 160, 8, 8), (10240, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.cat]
triton_poi_fused_cat_10.run(buf21, buf20, primals_15, buf12, buf22, 40960, grid=grid(40960), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 112, 8, 8), (7168, 64, 8, 1))
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_13], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf24, primals_17, 28672, grid=grid(28672), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 112, 8, 8), (7168, 64, 8, 1))
buf26 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_12.run(buf26, 16, grid=grid(16), stream=stream0)
buf27 = empty_strided_cuda((4, 160, 16, 16), (40960, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.cat]
triton_poi_fused_cat_13.run(buf26, buf25, primals_19, buf8, buf27, 163840, grid=grid(163840), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 96, 16, 16), (24576, 256, 16, 1))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x_17], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf29, primals_21, 98304, grid=grid(98304), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 96, 16, 16), (24576, 256, 16, 1))
buf31 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_15.run(buf31, 32, grid=grid(32), stream=stream0)
buf32 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.cat]
triton_poi_fused_cat_16.run(buf31, buf30, primals_23, buf4, buf32, 524288, grid=grid(524288), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf32, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf34 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x_21], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf34, primals_25, 262144, grid=grid(262144), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf36 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_18.run(buf36, 64, grid=grid(64), stream=stream0)
buf37 = empty_strided_cuda((4, 67, 64, 64), (274432, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.cat]
triton_poi_fused_cat_19.run(buf36, buf35, primals_27, primals_3, buf37, 1097728, grid=grid(1097728), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, x_25], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf39, primals_29, 1048576, grid=grid(1048576), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf41 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [conv2d_14, x_26], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf41, primals_31, 524288, grid=grid(524288), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution]
triton_poi_fused_convolution_21.run(buf43, primals_33, 49152, grid=grid(49152), stream=stream0)
del primals_33
buf44 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, x_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_22.run(buf35, primals_27, buf44, 262144, grid=grid(262144), stream=stream0)
del buf35
del primals_27
buf45 = empty_strided_cuda((4, 96, 16, 16), (24576, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_10, x_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_23.run(buf30, primals_23, buf45, 98304, grid=grid(98304), stream=stream0)
del buf30
del primals_23
buf46 = empty_strided_cuda((4, 112, 8, 8), (7168, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_8, x_14], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_24.run(buf25, primals_19, buf46, 28672, grid=grid(28672), stream=stream0)
del buf25
del primals_19
buf47 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_6, x_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_25.run(buf20, primals_15, buf47, 6144, grid=grid(6144), stream=stream0)
del buf20
del primals_15
return (buf43, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, buf1, buf3, buf4, buf5, buf7, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf24, buf26, buf27, buf29, buf31, buf32, buf34, buf36, buf37, buf39, buf41, buf44, buf45, buf46, buf47, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((48, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((80, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((96, 80, 3, 3), (720, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((112, 160, 3, 3), (1440, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((112, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((112, 112, 3, 3), (1008, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((112, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((96, 160, 3, 3), (1440, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((96, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((64, 67, 3, 3), (603, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((3, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def Conv(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, 3, padding=1)
def concat(a, b):
return torch.cat((a, b), 1)
def pool(x):
return F.max_pool2d(x, 2, 2)
def relu(x):
return F.relu(x, inplace=True)
def upsample(x):
return F.interpolate(x, scale_factor=2, mode='nearest')
class UNet(nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(UNet, self).__init__()
ic = in_channels
ec1 = 32
ec2 = 48
ec3 = 64
ec4 = 80
ec5 = 96
dc4 = 112
dc3 = 96
dc2 = 64
dc1a = 64
dc1b = 32
oc = out_channels
self.enc_conv0 = Conv(ic, ec1)
self.enc_conv1 = Conv(ec1, ec1)
self.enc_conv2 = Conv(ec1, ec2)
self.enc_conv3 = Conv(ec2, ec3)
self.enc_conv4 = Conv(ec3, ec4)
self.enc_conv5a = Conv(ec4, ec5)
self.enc_conv5b = Conv(ec5, ec5)
self.dec_conv4a = Conv(ec5 + ec3, dc4)
self.dec_conv4b = Conv(dc4, dc4)
self.dec_conv3a = Conv(dc4 + ec2, dc3)
self.dec_conv3b = Conv(dc3, dc3)
self.dec_conv2a = Conv(dc3 + ec1, dc2)
self.dec_conv2b = Conv(dc2, dc2)
self.dec_conv1a = Conv(dc2 + ic, dc1a)
self.dec_conv1b = Conv(dc1a, dc1b)
self.dec_conv0 = Conv(dc1b, oc)
self.alignment = 16
def forward(self, input):
x = relu(self.enc_conv0(input))
x = relu(self.enc_conv1(x))
x = pool1 = pool(x)
x = relu(self.enc_conv2(x))
x = pool2 = pool(x)
x = relu(self.enc_conv3(x))
x = pool3 = pool(x)
x = relu(self.enc_conv4(x))
x = pool(x)
x = relu(self.enc_conv5a(x))
x = relu(self.enc_conv5b(x))
x = upsample(x)
x = concat(x, pool3)
x = relu(self.dec_conv4a(x))
x = relu(self.dec_conv4b(x))
x = upsample(x)
x = concat(x, pool2)
x = relu(self.dec_conv3a(x))
x = relu(self.dec_conv3b(x))
x = upsample(x)
x = concat(x, pool1)
x = relu(self.dec_conv2a(x))
x = relu(self.dec_conv2b(x))
x = upsample(x)
x = concat(x, input)
x = relu(self.dec_conv1a(x))
x = relu(self.dec_conv1b(x))
x = self.dec_conv0(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 48
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 80
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 96
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_9(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 64 % 160
x1 = xindex // 8 % 8
x0 = xindex % 8
x3 = xindex // 10240
x4 = xindex % 64
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 4 * tmp9 + 16 * x2 + 1536 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 160, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 64 * (-96 + x2) + 4096 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 112
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 256 % 160
x1 = xindex // 16 % 16
x0 = xindex % 16
x3 = xindex // 40960
x4 = xindex % 256
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 112, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 8, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 8 * tmp9 + 64 * x2 + 7168 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 160, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 256 * (-112 + x2) + 12288 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 96
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 1024 % 128
x1 = xindex // 32 % 32
x0 = xindex % 32
x3 = xindex // 131072
x4 = xindex % 1024
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 16, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 16 * tmp9 + 256 * x2 + 24576 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 1024 * (-96 + x2) + 32768 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4096 % 67
x1 = xindex // 64 % 64
x0 = xindex % 64
x3 = xindex // 274432
x4 = xindex % 4096
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 32, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 32 * tmp9 + 1024 * x2 + 65536 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 67, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 4096 * (-64 + x2) + 12288 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 96
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 112
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_25(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 96
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32, primals_33
) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (48, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (48,), (1,))
assert_size_stride(primals_8, (64, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (80, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (80,), (1,))
assert_size_stride(primals_12, (96, 80, 3, 3), (720, 9, 3, 1))
assert_size_stride(primals_13, (96,), (1,))
assert_size_stride(primals_14, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_15, (96,), (1,))
assert_size_stride(primals_16, (112, 160, 3, 3), (1440, 9, 3, 1))
assert_size_stride(primals_17, (112,), (1,))
assert_size_stride(primals_18, (112, 112, 3, 3), (1008, 9, 3, 1))
assert_size_stride(primals_19, (112,), (1,))
assert_size_stride(primals_20, (96, 160, 3, 3), (1440, 9, 3, 1))
assert_size_stride(primals_21, (96,), (1,))
assert_size_stride(primals_22, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_23, (96,), (1,))
assert_size_stride(primals_24, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_27, (64,), (1,))
assert_size_stride(primals_28, (64, 67, 3, 3), (603, 9, 3, 1))
assert_size_stride(primals_29, (64,), (1,))
assert_size_stride(primals_30, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_31, (32,), (1,))
assert_size_stride(primals_32, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_33, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf3, buf4,
buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 48, 32, 32), (49152, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(196608)](buf7, primals_7,
196608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1),
torch.float32)
buf9 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(49152)](buf7, buf8,
buf9, 49152, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 16, 16), (16384, 256, 16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(65536)](buf11, primals_9,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf13 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf11,
buf12, buf13, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 80, 8, 8), (5120, 64, 8, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_6[grid(20480)](buf15, primals_11,
20480, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf16 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch.
float32)
buf17 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(5120)](buf15, buf16,
buf17, 5120, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 96, 4, 4), (1536, 16, 4, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_8[grid(6144)](buf19, primals_13,
6144, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf20 = extern_kernels.convolution(buf19, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 96, 4, 4), (1536, 16, 4, 1))
buf21 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_9[grid(8)](buf21, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 160, 8, 8), (10240, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_10[grid(40960)](buf21, buf20, primals_15,
buf12, buf22, 40960, XBLOCK=256, num_warps=4, num_stages=1)
buf23 = extern_kernels.convolution(buf22, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 112, 8, 8), (7168, 64, 8, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_11[grid(28672)](buf24, primals_17,
28672, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf25 = extern_kernels.convolution(buf24, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 112, 8, 8), (7168, 64, 8, 1))
buf26 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_12[grid(16)](buf26, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((4, 160, 16, 16), (40960, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_13[grid(163840)](buf26, buf25, primals_19,
buf8, buf27, 163840, XBLOCK=512, num_warps=8, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 96, 16, 16), (24576, 256, 16, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_14[grid(98304)](buf29, primals_21,
98304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf30 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 96, 16, 16), (24576, 256, 16, 1))
buf31 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_15[grid(32)](buf31, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_16[grid(524288)](buf31, buf30, primals_23,
buf4, buf32, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf33 = extern_kernels.convolution(buf32, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf34 = buf33
del buf33
triton_poi_fused_convolution_relu_17[grid(262144)](buf34,
primals_25, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_25
buf35 = extern_kernels.convolution(buf34, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf36 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_18[grid(64)](buf36, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf37 = empty_strided_cuda((4, 67, 64, 64), (274432, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_19[grid(1097728)](buf36, buf35, primals_27,
primals_3, buf37, 1097728, XBLOCK=1024, num_warps=4, num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_20[grid(1048576)](buf39,
primals_29, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_29
buf40 = extern_kernels.convolution(buf39, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_0[grid(524288)](buf41, primals_31,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf42 = extern_kernels.convolution(buf41, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_21[grid(49152)](buf43, primals_33,
49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_33
buf44 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_22[grid(262144)](
buf35, primals_27, buf44, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf35
del primals_27
buf45 = empty_strided_cuda((4, 96, 16, 16), (24576, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_23[grid(98304)](
buf30, primals_23, buf45, 98304, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf30
del primals_23
buf46 = empty_strided_cuda((4, 112, 8, 8), (7168, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_24[grid(28672)](
buf25, primals_19, buf46, 28672, XBLOCK=128, num_warps=4,
num_stages=1)
del buf25
del primals_19
buf47 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_25[grid(6144)](
buf20, primals_15, buf47, 6144, XBLOCK=256, num_warps=4,
num_stages=1)
del buf20
del primals_15
return (buf43, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, buf1, buf3, buf4, buf5, buf7, buf8, buf9,
buf11, buf12, buf13, buf15, buf16, buf17, buf19, buf21, buf22,
buf24, buf26, buf27, buf29, buf31, buf32, buf34, buf36, buf37,
buf39, buf41, buf44, buf45, buf46, buf47)
def Conv(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, 3, padding=1)
def concat(a, b):
return torch.cat((a, b), 1)
def pool(x):
return F.max_pool2d(x, 2, 2)
def relu(x):
return F.relu(x, inplace=True)
def upsample(x):
return F.interpolate(x, scale_factor=2, mode='nearest')
class UNetNew(nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(UNetNew, self).__init__()
ic = in_channels
ec1 = 32
ec2 = 48
ec3 = 64
ec4 = 80
ec5 = 96
dc4 = 112
dc3 = 96
dc2 = 64
dc1a = 64
dc1b = 32
oc = out_channels
self.enc_conv0 = Conv(ic, ec1)
self.enc_conv1 = Conv(ec1, ec1)
self.enc_conv2 = Conv(ec1, ec2)
self.enc_conv3 = Conv(ec2, ec3)
self.enc_conv4 = Conv(ec3, ec4)
self.enc_conv5a = Conv(ec4, ec5)
self.enc_conv5b = Conv(ec5, ec5)
self.dec_conv4a = Conv(ec5 + ec3, dc4)
self.dec_conv4b = Conv(dc4, dc4)
self.dec_conv3a = Conv(dc4 + ec2, dc3)
self.dec_conv3b = Conv(dc3, dc3)
self.dec_conv2a = Conv(dc3 + ec1, dc2)
self.dec_conv2b = Conv(dc2, dc2)
self.dec_conv1a = Conv(dc2 + ic, dc1a)
self.dec_conv1b = Conv(dc1a, dc1b)
self.dec_conv0 = Conv(dc1b, oc)
self.alignment = 16
def forward(self, input_0):
primals_1 = self.enc_conv0.weight
primals_2 = self.enc_conv0.bias
primals_4 = self.enc_conv1.weight
primals_5 = self.enc_conv1.bias
primals_6 = self.enc_conv2.weight
primals_7 = self.enc_conv2.bias
primals_8 = self.enc_conv3.weight
primals_9 = self.enc_conv3.bias
primals_10 = self.enc_conv4.weight
primals_11 = self.enc_conv4.bias
primals_12 = self.enc_conv5a.weight
primals_13 = self.enc_conv5a.bias
primals_14 = self.enc_conv5b.weight
primals_15 = self.enc_conv5b.bias
primals_16 = self.dec_conv4a.weight
primals_17 = self.dec_conv4a.bias
primals_18 = self.dec_conv4b.weight
primals_19 = self.dec_conv4b.bias
primals_20 = self.dec_conv3a.weight
primals_21 = self.dec_conv3a.bias
primals_22 = self.dec_conv3b.weight
primals_23 = self.dec_conv3b.bias
primals_24 = self.dec_conv2a.weight
primals_25 = self.dec_conv2a.bias
primals_26 = self.dec_conv2b.weight
primals_27 = self.dec_conv2b.bias
primals_28 = self.dec_conv1a.weight
primals_29 = self.dec_conv1a.bias
primals_30 = self.dec_conv1b.weight
primals_31 = self.dec_conv1b.bias
primals_32 = self.dec_conv0.weight
primals_33 = self.dec_conv0.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33])
return output[0]
| arpan-dhatt/oidn | UNet | false | 14,956 | [
"Apache-2.0"
]
| 1,206 | 9419411ba4b343b475b53587cadd44c83d68dc2a | https://github.com/arpan-dhatt/oidn/tree/9419411ba4b343b475b53587cadd44c83d68dc2a |
SparsemaxBisect | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/no/cnoevxmi56p67zph5bcvud6uollxe7h3vjriggenmd3qdtjq74rl.py
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, tau_m, sub_6, p_m, sum_2, f_m, sub_3, clamp, sum_1, f_lo, mul_1, tau_lo_1, dm_2, tau_m_1, sub_8, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_10, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_12, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_14, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_16, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_18, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_20, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_22, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_24, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_26, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_28, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_30, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_32, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_34, p_m_14, sum_16, f_m_14, mul_15, tau_lo_15, dm_16, tau_m_15, sub_36, p_m_15, sum_17, f_m_15, mul_16, tau_lo_16, dm_17, tau_m_16, sub_38, p_m_16, sum_18, f_m_16, mul_17, tau_lo_17], Original ATen: [aten.max, aten.sub, aten.div, aten.add, aten.clamp, aten.sum, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm => sub_5
# dm_1 => div
# dm_10 => div_9
# dm_11 => div_10
# dm_12 => div_11
# dm_13 => div_12
# dm_14 => div_13
# dm_15 => div_14
# dm_16 => div_15
# dm_17 => div_16
# dm_2 => div_1
# dm_3 => div_2
# dm_4 => div_3
# dm_5 => div_4
# dm_6 => div_5
# dm_7 => div_6
# dm_8 => div_7
# dm_9 => div_8
# f_lo => sub_4
# f_m => sub_7
# f_m_1 => sub_9
# f_m_10 => sub_27
# f_m_11 => sub_29
# f_m_12 => sub_31
# f_m_13 => sub_33
# f_m_14 => sub_35
# f_m_15 => sub_37
# f_m_16 => sub_39
# f_m_2 => sub_11
# f_m_3 => sub_13
# f_m_4 => sub_15
# f_m_5 => sub_17
# f_m_6 => sub_19
# f_m_7 => sub_21
# f_m_8 => sub_23
# f_m_9 => sub_25
# max_1 => max_1
# mul_1 => mul_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_12 => mul_12
# mul_13 => mul_13
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_17 => mul_17
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# p_m => clamp_min_1
# p_m_1 => clamp_min_2
# p_m_10 => clamp_min_11
# p_m_11 => clamp_min_12
# p_m_12 => clamp_min_13
# p_m_13 => clamp_min_14
# p_m_14 => clamp_min_15
# p_m_15 => clamp_min_16
# p_m_16 => clamp_min_17
# p_m_2 => clamp_min_3
# p_m_3 => clamp_min_4
# p_m_4 => clamp_min_5
# p_m_5 => clamp_min_6
# p_m_6 => clamp_min_7
# p_m_7 => clamp_min_8
# p_m_8 => clamp_min_9
# p_m_9 => clamp_min_10
# sub_10 => sub_10
# sub_12 => sub_12
# sub_14 => sub_14
# sub_16 => sub_16
# sub_18 => sub_18
# sub_20 => sub_20
# sub_22 => sub_22
# sub_24 => sub_24
# sub_26 => sub_26
# sub_28 => sub_28
# sub_3 => sub_3
# sub_30 => sub_30
# sub_32 => sub_32
# sub_34 => sub_34
# sub_36 => sub_36
# sub_38 => sub_38
# sub_6 => sub_6
# sub_8 => sub_8
# sum_1 => sum_1
# sum_10 => sum_10
# sum_11 => sum_11
# sum_12 => sum_12
# sum_13 => sum_13
# sum_14 => sum_14
# sum_15 => sum_15
# sum_16 => sum_16
# sum_17 => sum_17
# sum_18 => sum_18
# sum_2 => sum_2
# sum_3 => sum_3
# sum_4 => sum_4
# sum_5 => sum_5
# sum_6 => sum_6
# sum_7 => sum_7
# sum_8 => sum_8
# sum_9 => sum_9
# tau_hi => sub_2
# tau_lo => sub_1
# tau_lo_1 => where
# tau_lo_10 => where_9
# tau_lo_11 => where_10
# tau_lo_12 => where_11
# tau_lo_13 => where_12
# tau_lo_14 => where_13
# tau_lo_15 => where_14
# tau_lo_16 => where_15
# tau_lo_17 => where_16
# tau_lo_2 => where_1
# tau_lo_3 => where_2
# tau_lo_4 => where_3
# tau_lo_5 => where_4
# tau_lo_6 => where_5
# tau_lo_7 => where_6
# tau_lo_8 => where_7
# tau_lo_9 => where_8
# tau_m => add
# tau_m_1 => add_1
# tau_m_10 => add_10
# tau_m_11 => add_11
# tau_m_12 => add_12
# tau_m_13 => add_13
# tau_m_14 => add_14
# tau_m_15 => add_15
# tau_m_16 => add_16
# tau_m_2 => add_2
# tau_m_3 => add_3
# tau_m_4 => add_4
# tau_m_5 => add_5
# tau_m_6 => add_6
# tau_m_7 => add_7
# tau_m_8 => add_8
# tau_m_9 => add_9
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 0.25), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %sub_1), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_5, 2), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %div), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_6, 0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_1, [-1]), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_2, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %sub_4), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze, %add, %sub_1), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %div_1), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_1), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_8, 0), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_2, [-1]), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %sub_4), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_1, %add_1, %where), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %div_2), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_2), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_3, [-1]), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_4, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %sub_4), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %add_2, %where_1), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_2, %div_3), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_3), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_12, 0), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_4, [-1]), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %sub_4), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_3, %add_3, %where_2), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_3, %div_4), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_4), kwargs = {})
# %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_14, 0), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_5, [-1]), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_6, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %sub_4), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_4, %add_4, %where_3), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %div_5), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_5), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_6, [-1]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_7, 1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %sub_4), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_5, %add_5, %where_4), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %div_6), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_6), kwargs = {})
# %clamp_min_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_18, 0), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_7, [-1]), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_8, 1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %sub_4), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_6, %add_6, %where_5), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_6, %div_7), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_7), kwargs = {})
# %clamp_min_8 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_20, 0), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_8, [-1]), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_9, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %sub_4), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_7, %add_7, %where_6), kwargs = {})
# %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_7, %div_8), kwargs = {})
# %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_8), kwargs = {})
# %clamp_min_9 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_22, 0), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_9, [-1]), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_10, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %sub_4), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_8, %add_8, %where_7), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_8, %div_9), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_9), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_24, 0), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_10, [-1]), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_11, 1), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %sub_4), kwargs = {})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_9, %add_9, %where_8), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_9, %div_10), kwargs = {})
# %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_10), kwargs = {})
# %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_26, 0), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_11, [-1]), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_12, 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %sub_4), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_10, %add_10, %where_9), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_10, %div_11), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_11), kwargs = {})
# %clamp_min_12 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_28, 0), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_12, [-1]), kwargs = {})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_13, 1), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %sub_4), kwargs = {})
# %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_11, %add_11, %where_10), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_11, %div_12), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_12), kwargs = {})
# %clamp_min_13 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_30, 0), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_13, [-1]), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_14, 1), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %sub_4), kwargs = {})
# %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_12, %add_12, %where_11), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_12, %div_13), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_13), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_32, 0), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_14, [-1]), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_15, 1), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %sub_4), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_13, %add_13, %where_12), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_13, %div_14), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_14), kwargs = {})
# %clamp_min_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_34, 0), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_15, [-1]), kwargs = {})
# %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_16, 1), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %sub_4), kwargs = {})
# %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_14, %add_14, %where_13), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_14, %div_15), kwargs = {})
# %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_15), kwargs = {})
# %clamp_min_16 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_36, 0), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_16, [-1]), kwargs = {})
# %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_17, 1), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_37, %sub_4), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_15, %add_15, %where_14), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_15, %div_16), kwargs = {})
# %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_16), kwargs = {})
# %clamp_min_17 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_38, 0), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_17, [-1]), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_18, 1), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %sub_4), kwargs = {})
# %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_16, %add_16, %where_15), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr8'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0(in_out_ptr8, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp9 = 0.25
tmp10 = tmp6 - tmp9
tmp11 = tmp10 - tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = tmp0 - tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp1 - tmp14
tmp19 = triton_helpers.maximum(tmp18, tmp16)
tmp20 = tmp17 + tmp19
tmp21 = tmp3 - tmp14
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = tmp20 + tmp22
tmp24 = tmp5 - tmp14
tmp25 = triton_helpers.maximum(tmp24, tmp16)
tmp26 = tmp23 + tmp25
tmp27 = tmp26 - tmp7
tmp28 = tmp0 - tmp8
tmp29 = triton_helpers.maximum(tmp28, tmp16)
tmp30 = tmp1 - tmp8
tmp31 = triton_helpers.maximum(tmp30, tmp16)
tmp32 = tmp29 + tmp31
tmp33 = tmp3 - tmp8
tmp34 = triton_helpers.maximum(tmp33, tmp16)
tmp35 = tmp32 + tmp34
tmp36 = tmp5 - tmp8
tmp37 = triton_helpers.maximum(tmp36, tmp16)
tmp38 = tmp35 + tmp37
tmp39 = tmp38 - tmp7
tmp40 = tmp27 * tmp39
tmp41 = tmp40 >= tmp16
tmp42 = tl.where(tmp41, tmp14, tmp8)
tmp43 = tmp13 * tmp12
tmp44 = tmp42 + tmp43
tmp45 = tmp0 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp16)
tmp47 = tmp1 - tmp44
tmp48 = triton_helpers.maximum(tmp47, tmp16)
tmp49 = tmp46 + tmp48
tmp50 = tmp3 - tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp16)
tmp52 = tmp49 + tmp51
tmp53 = tmp5 - tmp44
tmp54 = triton_helpers.maximum(tmp53, tmp16)
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp7
tmp57 = tmp56 * tmp39
tmp58 = tmp57 >= tmp16
tmp59 = tl.where(tmp58, tmp44, tmp42)
tmp60 = tmp43 * tmp12
tmp61 = tmp59 + tmp60
tmp62 = tmp0 - tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp16)
tmp64 = tmp1 - tmp61
tmp65 = triton_helpers.maximum(tmp64, tmp16)
tmp66 = tmp63 + tmp65
tmp67 = tmp3 - tmp61
tmp68 = triton_helpers.maximum(tmp67, tmp16)
tmp69 = tmp66 + tmp68
tmp70 = tmp5 - tmp61
tmp71 = triton_helpers.maximum(tmp70, tmp16)
tmp72 = tmp69 + tmp71
tmp73 = tmp72 - tmp7
tmp74 = tmp73 * tmp39
tmp75 = tmp74 >= tmp16
tmp76 = tl.where(tmp75, tmp61, tmp59)
tmp77 = tmp60 * tmp12
tmp78 = tmp76 + tmp77
tmp79 = tmp0 - tmp78
tmp80 = triton_helpers.maximum(tmp79, tmp16)
tmp81 = tmp1 - tmp78
tmp82 = triton_helpers.maximum(tmp81, tmp16)
tmp83 = tmp80 + tmp82
tmp84 = tmp3 - tmp78
tmp85 = triton_helpers.maximum(tmp84, tmp16)
tmp86 = tmp83 + tmp85
tmp87 = tmp5 - tmp78
tmp88 = triton_helpers.maximum(tmp87, tmp16)
tmp89 = tmp86 + tmp88
tmp90 = tmp89 - tmp7
tmp91 = tmp90 * tmp39
tmp92 = tmp91 >= tmp16
tmp93 = tl.where(tmp92, tmp78, tmp76)
tmp94 = tmp77 * tmp12
tmp95 = tmp93 + tmp94
tmp96 = tmp0 - tmp95
tmp97 = triton_helpers.maximum(tmp96, tmp16)
tmp98 = tmp1 - tmp95
tmp99 = triton_helpers.maximum(tmp98, tmp16)
tmp100 = tmp97 + tmp99
tmp101 = tmp3 - tmp95
tmp102 = triton_helpers.maximum(tmp101, tmp16)
tmp103 = tmp100 + tmp102
tmp104 = tmp5 - tmp95
tmp105 = triton_helpers.maximum(tmp104, tmp16)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 - tmp7
tmp108 = tmp107 * tmp39
tmp109 = tmp108 >= tmp16
tmp110 = tl.where(tmp109, tmp95, tmp93)
tmp111 = tmp94 * tmp12
tmp112 = tmp110 + tmp111
tmp113 = tmp0 - tmp112
tmp114 = triton_helpers.maximum(tmp113, tmp16)
tmp115 = tmp1 - tmp112
tmp116 = triton_helpers.maximum(tmp115, tmp16)
tmp117 = tmp114 + tmp116
tmp118 = tmp3 - tmp112
tmp119 = triton_helpers.maximum(tmp118, tmp16)
tmp120 = tmp117 + tmp119
tmp121 = tmp5 - tmp112
tmp122 = triton_helpers.maximum(tmp121, tmp16)
tmp123 = tmp120 + tmp122
tmp124 = tmp123 - tmp7
tmp125 = tmp124 * tmp39
tmp126 = tmp125 >= tmp16
tmp127 = tl.where(tmp126, tmp112, tmp110)
tmp128 = tmp111 * tmp12
tmp129 = tmp127 + tmp128
tmp130 = tmp0 - tmp129
tmp131 = triton_helpers.maximum(tmp130, tmp16)
tmp132 = tmp1 - tmp129
tmp133 = triton_helpers.maximum(tmp132, tmp16)
tmp134 = tmp131 + tmp133
tmp135 = tmp3 - tmp129
tmp136 = triton_helpers.maximum(tmp135, tmp16)
tmp137 = tmp134 + tmp136
tmp138 = tmp5 - tmp129
tmp139 = triton_helpers.maximum(tmp138, tmp16)
tmp140 = tmp137 + tmp139
tmp141 = tmp140 - tmp7
tmp142 = tmp141 * tmp39
tmp143 = tmp142 >= tmp16
tmp144 = tl.where(tmp143, tmp129, tmp127)
tmp145 = tmp128 * tmp12
tmp146 = tmp144 + tmp145
tmp147 = tmp0 - tmp146
tmp148 = triton_helpers.maximum(tmp147, tmp16)
tmp149 = tmp1 - tmp146
tmp150 = triton_helpers.maximum(tmp149, tmp16)
tmp151 = tmp148 + tmp150
tmp152 = tmp3 - tmp146
tmp153 = triton_helpers.maximum(tmp152, tmp16)
tmp154 = tmp151 + tmp153
tmp155 = tmp5 - tmp146
tmp156 = triton_helpers.maximum(tmp155, tmp16)
tmp157 = tmp154 + tmp156
tmp158 = tmp157 - tmp7
tmp159 = tmp158 * tmp39
tmp160 = tmp159 >= tmp16
tmp161 = tl.where(tmp160, tmp146, tmp144)
tmp162 = tmp145 * tmp12
tmp163 = tmp161 + tmp162
tmp164 = tmp0 - tmp163
tmp165 = triton_helpers.maximum(tmp164, tmp16)
tmp166 = tmp1 - tmp163
tmp167 = triton_helpers.maximum(tmp166, tmp16)
tmp168 = tmp165 + tmp167
tmp169 = tmp3 - tmp163
tmp170 = triton_helpers.maximum(tmp169, tmp16)
tmp171 = tmp168 + tmp170
tmp172 = tmp5 - tmp163
tmp173 = triton_helpers.maximum(tmp172, tmp16)
tmp174 = tmp171 + tmp173
tmp175 = tmp174 - tmp7
tmp176 = tmp175 * tmp39
tmp177 = tmp176 >= tmp16
tmp178 = tl.where(tmp177, tmp163, tmp161)
tmp179 = tmp162 * tmp12
tmp180 = tmp178 + tmp179
tmp181 = tmp0 - tmp180
tmp182 = triton_helpers.maximum(tmp181, tmp16)
tmp183 = tmp1 - tmp180
tmp184 = triton_helpers.maximum(tmp183, tmp16)
tmp185 = tmp182 + tmp184
tmp186 = tmp3 - tmp180
tmp187 = triton_helpers.maximum(tmp186, tmp16)
tmp188 = tmp185 + tmp187
tmp189 = tmp5 - tmp180
tmp190 = triton_helpers.maximum(tmp189, tmp16)
tmp191 = tmp188 + tmp190
tmp192 = tmp191 - tmp7
tmp193 = tmp192 * tmp39
tmp194 = tmp193 >= tmp16
tmp195 = tl.where(tmp194, tmp180, tmp178)
tmp196 = tmp179 * tmp12
tmp197 = tmp195 + tmp196
tmp198 = tmp0 - tmp197
tmp199 = triton_helpers.maximum(tmp198, tmp16)
tmp200 = tmp1 - tmp197
tmp201 = triton_helpers.maximum(tmp200, tmp16)
tmp202 = tmp199 + tmp201
tmp203 = tmp3 - tmp197
tmp204 = triton_helpers.maximum(tmp203, tmp16)
tmp205 = tmp202 + tmp204
tmp206 = tmp5 - tmp197
tmp207 = triton_helpers.maximum(tmp206, tmp16)
tmp208 = tmp205 + tmp207
tmp209 = tmp208 - tmp7
tmp210 = tmp209 * tmp39
tmp211 = tmp210 >= tmp16
tmp212 = tl.where(tmp211, tmp197, tmp195)
tmp213 = tmp196 * tmp12
tmp214 = tmp212 + tmp213
tmp215 = tmp0 - tmp214
tmp216 = triton_helpers.maximum(tmp215, tmp16)
tmp217 = tmp1 - tmp214
tmp218 = triton_helpers.maximum(tmp217, tmp16)
tmp219 = tmp216 + tmp218
tmp220 = tmp3 - tmp214
tmp221 = triton_helpers.maximum(tmp220, tmp16)
tmp222 = tmp219 + tmp221
tmp223 = tmp5 - tmp214
tmp224 = triton_helpers.maximum(tmp223, tmp16)
tmp225 = tmp222 + tmp224
tmp226 = tmp225 - tmp7
tmp227 = tmp226 * tmp39
tmp228 = tmp227 >= tmp16
tmp229 = tl.where(tmp228, tmp214, tmp212)
tmp230 = tmp213 * tmp12
tmp231 = tmp229 + tmp230
tmp232 = tmp0 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp16)
tmp234 = tmp1 - tmp231
tmp235 = triton_helpers.maximum(tmp234, tmp16)
tmp236 = tmp233 + tmp235
tmp237 = tmp3 - tmp231
tmp238 = triton_helpers.maximum(tmp237, tmp16)
tmp239 = tmp236 + tmp238
tmp240 = tmp5 - tmp231
tmp241 = triton_helpers.maximum(tmp240, tmp16)
tmp242 = tmp239 + tmp241
tmp243 = tmp242 - tmp7
tmp244 = tmp243 * tmp39
tmp245 = tmp244 >= tmp16
tmp246 = tl.where(tmp245, tmp231, tmp229)
tmp247 = tmp230 * tmp12
tmp248 = tmp246 + tmp247
tmp249 = tmp0 - tmp248
tmp250 = triton_helpers.maximum(tmp249, tmp16)
tmp251 = tmp1 - tmp248
tmp252 = triton_helpers.maximum(tmp251, tmp16)
tmp253 = tmp250 + tmp252
tmp254 = tmp3 - tmp248
tmp255 = triton_helpers.maximum(tmp254, tmp16)
tmp256 = tmp253 + tmp255
tmp257 = tmp5 - tmp248
tmp258 = triton_helpers.maximum(tmp257, tmp16)
tmp259 = tmp256 + tmp258
tmp260 = tmp259 - tmp7
tmp261 = tmp260 * tmp39
tmp262 = tmp261 >= tmp16
tmp263 = tl.where(tmp262, tmp248, tmp246)
tmp264 = tmp247 * tmp12
tmp265 = tmp263 + tmp264
tmp266 = tmp0 - tmp265
tmp267 = triton_helpers.maximum(tmp266, tmp16)
tmp268 = tmp1 - tmp265
tmp269 = triton_helpers.maximum(tmp268, tmp16)
tmp270 = tmp267 + tmp269
tmp271 = tmp3 - tmp265
tmp272 = triton_helpers.maximum(tmp271, tmp16)
tmp273 = tmp270 + tmp272
tmp274 = tmp5 - tmp265
tmp275 = triton_helpers.maximum(tmp274, tmp16)
tmp276 = tmp273 + tmp275
tmp277 = tmp276 - tmp7
tmp278 = tmp277 * tmp39
tmp279 = tmp278 >= tmp16
tmp280 = tl.where(tmp279, tmp265, tmp263)
tmp281 = tmp264 * tmp12
tmp282 = tmp280 + tmp281
tmp283 = tmp0 - tmp282
tmp284 = triton_helpers.maximum(tmp283, tmp16)
tmp285 = tmp1 - tmp282
tmp286 = triton_helpers.maximum(tmp285, tmp16)
tmp287 = tmp284 + tmp286
tmp288 = tmp3 - tmp282
tmp289 = triton_helpers.maximum(tmp288, tmp16)
tmp290 = tmp287 + tmp289
tmp291 = tmp5 - tmp282
tmp292 = triton_helpers.maximum(tmp291, tmp16)
tmp293 = tmp290 + tmp292
tmp294 = tmp293 - tmp7
tmp295 = tmp294 * tmp39
tmp296 = tmp295 >= tmp16
tmp297 = tl.where(tmp296, tmp282, tmp280)
tmp298 = tmp281 * tmp12
tmp299 = tmp297 + tmp298
tmp300 = tmp0 - tmp299
tmp301 = triton_helpers.maximum(tmp300, tmp16)
tmp302 = tmp1 - tmp299
tmp303 = triton_helpers.maximum(tmp302, tmp16)
tmp304 = tmp301 + tmp303
tmp305 = tmp3 - tmp299
tmp306 = triton_helpers.maximum(tmp305, tmp16)
tmp307 = tmp304 + tmp306
tmp308 = tmp5 - tmp299
tmp309 = triton_helpers.maximum(tmp308, tmp16)
tmp310 = tmp307 + tmp309
tmp311 = tmp310 - tmp7
tmp312 = tmp311 * tmp39
tmp313 = tmp312 >= tmp16
tmp314 = tl.where(tmp313, tmp299, tmp297)
tl.store(in_out_ptr8 + (x0), tmp314, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/av/cavd5cj3tg3vokgvnwac6qqbcheawux6j46atz5eq3ovasf7jkxk.py
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, sub_3, clamp, sum_1, f_lo, dm_2, dm_3, dm_4, dm_5, dm_6, dm_7, dm_8, dm_9, dm_10, dm_11, dm_12, dm_13, dm_14, dm_15, dm_16, dm_17, dm_18, tau_m_17, sub_40, p_m_17, sum_19, f_m_17, mul_18, tau_lo_18, dm_19, tau_m_18, sub_42, p_m_18, sum_20, f_m_18, mul_19, tau_lo_19, dm_20, tau_m_19, sub_44, p_m_19, sum_21, f_m_19, tau_lo_20, dm_21, tau_m_20, sub_46, p_m_20, sum_22, f_m_20, mul_21, tau_lo_21, dm_22, tau_m_21, sub_48, p_m_21, sum_23, f_m_21, mul_22, tau_lo_22, dm_23, tau_m_22, sub_50, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_52, p_m_23, sum_25, f_m_23, mul_24, tau_lo_24, dm_25, tau_m_24, sub_54, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_56, p_m_25, sum_27, f_m_25, mul_26, tau_lo_26, dm_27, tau_m_26, sub_58, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_60, p_m_27, sum_29, f_m_27, tau_lo_28, dm_29, tau_m_28, sub_62, p_m_28, sum_30, tau_lo_29, dm_30, tau_m_29, sub_64, p_m_29, sum_31, tau_lo_30, dm_31, tau_m_30, sub_66, p_m_30, sum_32, tau_lo_31, dm_32, tau_m_31, sub_68, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_70, p_m_32, sum_34, tau_lo_33, dm_34, tau_m_33, sub_72, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_74, p_m_34, sum_36, tau_lo_35, dm_36, tau_m_35, sub_76, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_78, p_m_36, sum_38, tau_lo_37, dm_38, tau_m_37, sub_80, p_m_37, sum_39, tau_lo_38, dm_39, tau_m_38, sub_82, p_m_38, sum_40, tau_lo_39, dm_40, tau_m_39, sub_84, p_m_39, sum_41, tau_lo_40], Original ATen: [aten.max, aten.sub, aten.div, aten.clamp, aten.sum, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm => sub_5
# dm_1 => div
# dm_10 => div_9
# dm_11 => div_10
# dm_12 => div_11
# dm_13 => div_12
# dm_14 => div_13
# dm_15 => div_14
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_2 => div_1
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_3 => div_2
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_4 => div_3
# dm_40 => div_39
# dm_5 => div_4
# dm_6 => div_5
# dm_7 => div_6
# dm_8 => div_7
# dm_9 => div_8
# f_lo => sub_4
# f_m_17 => sub_41
# f_m_18 => sub_43
# f_m_19 => sub_45
# f_m_20 => sub_47
# f_m_21 => sub_49
# f_m_23 => sub_53
# f_m_25 => sub_57
# f_m_27 => sub_61
# max_1 => max_1
# mul_18 => mul_18
# mul_19 => mul_19
# mul_21 => mul_21
# mul_22 => mul_22
# mul_24 => mul_24
# mul_26 => mul_26
# p_m_17 => clamp_min_18
# p_m_18 => clamp_min_19
# p_m_19 => clamp_min_20
# p_m_20 => clamp_min_21
# p_m_21 => clamp_min_22
# p_m_22 => clamp_min_23
# p_m_23 => clamp_min_24
# p_m_24 => clamp_min_25
# p_m_25 => clamp_min_26
# p_m_26 => clamp_min_27
# p_m_27 => clamp_min_28
# p_m_28 => clamp_min_29
# p_m_29 => clamp_min_30
# p_m_30 => clamp_min_31
# p_m_31 => clamp_min_32
# p_m_32 => clamp_min_33
# p_m_33 => clamp_min_34
# p_m_34 => clamp_min_35
# p_m_35 => clamp_min_36
# p_m_36 => clamp_min_37
# p_m_37 => clamp_min_38
# p_m_38 => clamp_min_39
# p_m_39 => clamp_min_40
# sub_3 => sub_3
# sub_40 => sub_40
# sub_42 => sub_42
# sub_44 => sub_44
# sub_46 => sub_46
# sub_48 => sub_48
# sub_50 => sub_50
# sub_52 => sub_52
# sub_54 => sub_54
# sub_56 => sub_56
# sub_58 => sub_58
# sub_60 => sub_60
# sub_62 => sub_62
# sub_64 => sub_64
# sub_66 => sub_66
# sub_68 => sub_68
# sub_70 => sub_70
# sub_72 => sub_72
# sub_74 => sub_74
# sub_76 => sub_76
# sub_78 => sub_78
# sub_80 => sub_80
# sub_82 => sub_82
# sub_84 => sub_84
# sum_1 => sum_1
# sum_19 => sum_19
# sum_20 => sum_20
# sum_21 => sum_21
# sum_22 => sum_22
# sum_23 => sum_23
# sum_24 => sum_24
# sum_25 => sum_25
# sum_26 => sum_26
# sum_27 => sum_27
# sum_28 => sum_28
# sum_29 => sum_29
# sum_30 => sum_30
# sum_31 => sum_31
# sum_32 => sum_32
# sum_33 => sum_33
# sum_34 => sum_34
# sum_35 => sum_35
# sum_36 => sum_36
# sum_37 => sum_37
# sum_38 => sum_38
# sum_39 => sum_39
# sum_40 => sum_40
# sum_41 => sum_41
# tau_hi => sub_2
# tau_lo => sub_1
# tau_lo_18 => where_17
# tau_lo_19 => where_18
# tau_lo_20 => where_19
# tau_lo_21 => where_20
# tau_lo_22 => where_21
# tau_lo_23 => where_22
# tau_lo_24 => where_23
# tau_lo_25 => where_24
# tau_lo_26 => where_25
# tau_lo_27 => where_26
# tau_lo_28 => where_27
# tau_lo_29 => where_28
# tau_lo_30 => where_29
# tau_lo_31 => where_30
# tau_lo_32 => where_31
# tau_lo_33 => where_32
# tau_lo_34 => where_33
# tau_lo_35 => where_34
# tau_lo_36 => where_35
# tau_lo_37 => where_36
# tau_lo_38 => where_37
# tau_lo_39 => where_38
# tau_lo_40 => where_39
# tau_m_17 => add_17
# tau_m_18 => add_18
# tau_m_19 => add_19
# tau_m_20 => add_20
# tau_m_21 => add_21
# tau_m_22 => add_22
# tau_m_23 => add_23
# tau_m_24 => add_24
# tau_m_25 => add_25
# tau_m_26 => add_26
# tau_m_27 => add_27
# tau_m_28 => add_28
# tau_m_29 => add_29
# tau_m_30 => add_30
# tau_m_31 => add_31
# tau_m_32 => add_32
# tau_m_33 => add_33
# tau_m_34 => add_34
# tau_m_35 => add_35
# tau_m_36 => add_36
# tau_m_37 => add_37
# tau_m_38 => add_38
# tau_m_39 => add_39
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 0.25), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %sub_1), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_5, 2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {})
# %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_16, %div_17), kwargs = {})
# %sub_40 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_17), kwargs = {})
# %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_40, 0), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_18, [-1]), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_19, 1), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %sub_4), kwargs = {})
# %where_17 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_17, %add_17, %where_16), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_17, %div_18), kwargs = {})
# %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_18), kwargs = {})
# %clamp_min_19 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_42, 0), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_19, [-1]), kwargs = {})
# %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_20, 1), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_43, %sub_4), kwargs = {})
# %where_18 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_18, %add_18, %where_17), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_18, %div_19), kwargs = {})
# %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_19), kwargs = {})
# %clamp_min_20 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_44, 0), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_20, [-1]), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_21, 1), kwargs = {})
# %where_19 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_19, %add_19, %where_18), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_19, %div_20), kwargs = {})
# %sub_46 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_20), kwargs = {})
# %clamp_min_21 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_46, 0), kwargs = {})
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_21, [-1]), kwargs = {})
# %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_22, 1), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %sub_4), kwargs = {})
# %where_20 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_20, %add_20, %where_19), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %add_21 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_20, %div_21), kwargs = {})
# %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_21), kwargs = {})
# %clamp_min_22 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_48, 0), kwargs = {})
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_22, [-1]), kwargs = {})
# %sub_49 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_23, 1), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_49, %sub_4), kwargs = {})
# %where_21 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_21, %add_21, %where_20), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_21, %div_22), kwargs = {})
# %sub_50 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_22), kwargs = {})
# %clamp_min_23 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_50, 0), kwargs = {})
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_23, [-1]), kwargs = {})
# %where_22 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_22, %add_22, %where_21), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %add_23 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_22, %div_23), kwargs = {})
# %sub_52 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_23), kwargs = {})
# %clamp_min_24 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_52, 0), kwargs = {})
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_24, [-1]), kwargs = {})
# %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_25, 1), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_53, %sub_4), kwargs = {})
# %where_23 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_23, %add_23, %where_22), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_23, %div_24), kwargs = {})
# %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_24), kwargs = {})
# %clamp_min_25 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_54, 0), kwargs = {})
# %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_25, [-1]), kwargs = {})
# %where_24 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_24, %add_24, %where_23), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {})
# %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_25), kwargs = {})
# %clamp_min_26 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_56, 0), kwargs = {})
# %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_26, [-1]), kwargs = {})
# %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_27, 1), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %sub_4), kwargs = {})
# %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {})
# %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_26), kwargs = {})
# %clamp_min_27 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_58, 0), kwargs = {})
# %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_27, [-1]), kwargs = {})
# %where_26 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_26, %add_26, %where_25), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {})
# %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_27), kwargs = {})
# %clamp_min_28 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_60, 0), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_28, [-1]), kwargs = {})
# %sub_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_29, 1), kwargs = {})
# %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {})
# %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_28), kwargs = {})
# %clamp_min_29 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_62, 0), kwargs = {})
# %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_29, [-1]), kwargs = {})
# %where_28 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_28, %add_28, %where_27), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {})
# %sub_64 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_29), kwargs = {})
# %clamp_min_30 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_64, 0), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_30, [-1]), kwargs = {})
# %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {})
# %sub_66 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_30), kwargs = {})
# %clamp_min_31 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_66, 0), kwargs = {})
# %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_31, [-1]), kwargs = {})
# %where_30 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_30, %add_30, %where_29), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {})
# %sub_68 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_31), kwargs = {})
# %clamp_min_32 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_68, 0), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_32, [-1]), kwargs = {})
# %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_31, %add_31, %where_30), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_31, %div_32), kwargs = {})
# %sub_70 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_32), kwargs = {})
# %clamp_min_33 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_70, 0), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_33, [-1]), kwargs = {})
# %where_32 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_32, %add_32, %where_31), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %add_33 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_32, %div_33), kwargs = {})
# %sub_72 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_33), kwargs = {})
# %clamp_min_34 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_72, 0), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_34, [-1]), kwargs = {})
# %where_33 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_33, %add_33, %where_32), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {})
# %sub_74 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_34), kwargs = {})
# %clamp_min_35 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_74, 0), kwargs = {})
# %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_35, [-1]), kwargs = {})
# %where_34 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_34, %add_34, %where_33), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {})
# %sub_76 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_35), kwargs = {})
# %clamp_min_36 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_76, 0), kwargs = {})
# %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_36, [-1]), kwargs = {})
# %where_35 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_35, %add_35, %where_34), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {})
# %sub_78 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_36), kwargs = {})
# %clamp_min_37 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_78, 0), kwargs = {})
# %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_37, [-1]), kwargs = {})
# %where_36 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_36, %add_36, %where_35), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {})
# %sub_80 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_37), kwargs = {})
# %clamp_min_38 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_80, 0), kwargs = {})
# %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_38, [-1]), kwargs = {})
# %where_37 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_37, %add_37, %where_36), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {})
# %sub_82 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_38), kwargs = {})
# %clamp_min_39 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_82, 0), kwargs = {})
# %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_39, [-1]), kwargs = {})
# %where_38 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_38, %add_38, %where_37), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {})
# %sub_84 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_39), kwargs = {})
# %clamp_min_40 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_84, 0), kwargs = {})
# %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_40, [-1]), kwargs = {})
# %where_39 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_39, %add_39, %where_38), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1', 'mutated_arg_names': ['in_out_ptr16'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1(in_out_ptr16, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.25
tmp8 = tmp6 - tmp7
tmp9 = 1.0
tmp10 = tmp6 - tmp9
tmp11 = tmp8 - tmp10
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp12
tmp15 = tmp14 * tmp12
tmp16 = tmp15 * tmp12
tmp17 = tmp16 * tmp12
tmp18 = tmp17 * tmp12
tmp19 = tmp18 * tmp12
tmp20 = tmp19 * tmp12
tmp21 = tmp20 * tmp12
tmp22 = tmp21 * tmp12
tmp23 = tmp22 * tmp12
tmp24 = tmp23 * tmp12
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp12
tmp27 = tmp26 * tmp12
tmp28 = tmp27 * tmp12
tmp29 = tmp28 * tmp12
tmp30 = tmp29 * tmp12
tmp32 = tmp31 + tmp30
tmp33 = tmp0 - tmp32
tmp34 = 0.0
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp1 - tmp32
tmp37 = triton_helpers.maximum(tmp36, tmp34)
tmp38 = tmp35 + tmp37
tmp39 = tmp3 - tmp32
tmp40 = triton_helpers.maximum(tmp39, tmp34)
tmp41 = tmp38 + tmp40
tmp42 = tmp5 - tmp32
tmp43 = triton_helpers.maximum(tmp42, tmp34)
tmp44 = tmp41 + tmp43
tmp45 = tmp44 - tmp9
tmp46 = tmp0 - tmp10
tmp47 = triton_helpers.maximum(tmp46, tmp34)
tmp48 = tmp1 - tmp10
tmp49 = triton_helpers.maximum(tmp48, tmp34)
tmp50 = tmp47 + tmp49
tmp51 = tmp3 - tmp10
tmp52 = triton_helpers.maximum(tmp51, tmp34)
tmp53 = tmp50 + tmp52
tmp54 = tmp5 - tmp10
tmp55 = triton_helpers.maximum(tmp54, tmp34)
tmp56 = tmp53 + tmp55
tmp57 = tmp56 - tmp9
tmp58 = tmp45 * tmp57
tmp59 = tmp58 >= tmp34
tmp60 = tl.where(tmp59, tmp32, tmp31)
tmp61 = tmp30 * tmp12
tmp62 = tmp60 + tmp61
tmp63 = tmp0 - tmp62
tmp64 = triton_helpers.maximum(tmp63, tmp34)
tmp65 = tmp1 - tmp62
tmp66 = triton_helpers.maximum(tmp65, tmp34)
tmp67 = tmp64 + tmp66
tmp68 = tmp3 - tmp62
tmp69 = triton_helpers.maximum(tmp68, tmp34)
tmp70 = tmp67 + tmp69
tmp71 = tmp5 - tmp62
tmp72 = triton_helpers.maximum(tmp71, tmp34)
tmp73 = tmp70 + tmp72
tmp74 = tmp73 - tmp9
tmp75 = tmp74 * tmp57
tmp76 = tmp75 >= tmp34
tmp77 = tl.where(tmp76, tmp62, tmp60)
tmp78 = tmp61 * tmp12
tmp79 = tmp77 + tmp78
tmp80 = tmp0 - tmp79
tmp81 = triton_helpers.maximum(tmp80, tmp34)
tmp82 = tmp1 - tmp79
tmp83 = triton_helpers.maximum(tmp82, tmp34)
tmp84 = tmp81 + tmp83
tmp85 = tmp3 - tmp79
tmp86 = triton_helpers.maximum(tmp85, tmp34)
tmp87 = tmp84 + tmp86
tmp88 = tmp5 - tmp79
tmp89 = triton_helpers.maximum(tmp88, tmp34)
tmp90 = tmp87 + tmp89
tmp91 = tmp90 - tmp9
tmp92 = tmp91 * tmp57
tmp93 = tmp92 >= tmp34
tmp94 = tl.where(tmp93, tmp79, tmp77)
tmp95 = tmp78 * tmp12
tmp96 = tmp94 + tmp95
tmp97 = tmp0 - tmp96
tmp98 = triton_helpers.maximum(tmp97, tmp34)
tmp99 = tmp1 - tmp96
tmp100 = triton_helpers.maximum(tmp99, tmp34)
tmp101 = tmp98 + tmp100
tmp102 = tmp3 - tmp96
tmp103 = triton_helpers.maximum(tmp102, tmp34)
tmp104 = tmp101 + tmp103
tmp105 = tmp5 - tmp96
tmp106 = triton_helpers.maximum(tmp105, tmp34)
tmp107 = tmp104 + tmp106
tmp108 = tmp107 - tmp9
tmp109 = tmp108 * tmp57
tmp110 = tmp109 >= tmp34
tmp111 = tl.where(tmp110, tmp96, tmp94)
tmp112 = tmp95 * tmp12
tmp113 = tmp111 + tmp112
tmp114 = tmp0 - tmp113
tmp115 = triton_helpers.maximum(tmp114, tmp34)
tmp116 = tmp1 - tmp113
tmp117 = triton_helpers.maximum(tmp116, tmp34)
tmp118 = tmp115 + tmp117
tmp119 = tmp3 - tmp113
tmp120 = triton_helpers.maximum(tmp119, tmp34)
tmp121 = tmp118 + tmp120
tmp122 = tmp5 - tmp113
tmp123 = triton_helpers.maximum(tmp122, tmp34)
tmp124 = tmp121 + tmp123
tmp125 = tmp124 - tmp9
tmp126 = tmp125 * tmp57
tmp127 = tmp126 >= tmp34
tmp128 = tl.where(tmp127, tmp113, tmp111)
tmp129 = tmp112 * tmp12
tmp130 = tmp128 + tmp129
tmp131 = tmp0 - tmp130
tmp132 = triton_helpers.maximum(tmp131, tmp34)
tmp133 = tmp1 - tmp130
tmp134 = triton_helpers.maximum(tmp133, tmp34)
tmp135 = tmp132 + tmp134
tmp136 = tmp3 - tmp130
tmp137 = triton_helpers.maximum(tmp136, tmp34)
tmp138 = tmp135 + tmp137
tmp139 = tmp5 - tmp130
tmp140 = triton_helpers.maximum(tmp139, tmp34)
tmp141 = tmp138 + tmp140
tmp142 = tmp141 - tmp9
tmp143 = tmp142 * tmp57
tmp144 = tmp143 >= tmp34
tmp145 = tl.where(tmp144, tmp130, tmp128)
tmp146 = tmp129 * tmp12
tmp147 = tmp145 + tmp146
tmp148 = tmp0 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp34)
tmp150 = tmp1 - tmp147
tmp151 = triton_helpers.maximum(tmp150, tmp34)
tmp152 = tmp149 + tmp151
tmp153 = tmp3 - tmp147
tmp154 = triton_helpers.maximum(tmp153, tmp34)
tmp155 = tmp152 + tmp154
tmp156 = tmp5 - tmp147
tmp157 = triton_helpers.maximum(tmp156, tmp34)
tmp158 = tmp155 + tmp157
tmp159 = tmp158 - tmp9
tmp160 = tmp159 * tmp57
tmp161 = tmp160 >= tmp34
tmp162 = tl.where(tmp161, tmp147, tmp145)
tmp163 = tmp146 * tmp12
tmp164 = tmp162 + tmp163
tmp165 = tmp0 - tmp164
tmp166 = triton_helpers.maximum(tmp165, tmp34)
tmp167 = tmp1 - tmp164
tmp168 = triton_helpers.maximum(tmp167, tmp34)
tmp169 = tmp166 + tmp168
tmp170 = tmp3 - tmp164
tmp171 = triton_helpers.maximum(tmp170, tmp34)
tmp172 = tmp169 + tmp171
tmp173 = tmp5 - tmp164
tmp174 = triton_helpers.maximum(tmp173, tmp34)
tmp175 = tmp172 + tmp174
tmp176 = tmp175 - tmp9
tmp177 = tmp176 * tmp57
tmp178 = tmp177 >= tmp34
tmp179 = tl.where(tmp178, tmp164, tmp162)
tmp180 = tmp163 * tmp12
tmp181 = tmp179 + tmp180
tmp182 = tmp0 - tmp181
tmp183 = triton_helpers.maximum(tmp182, tmp34)
tmp184 = tmp1 - tmp181
tmp185 = triton_helpers.maximum(tmp184, tmp34)
tmp186 = tmp183 + tmp185
tmp187 = tmp3 - tmp181
tmp188 = triton_helpers.maximum(tmp187, tmp34)
tmp189 = tmp186 + tmp188
tmp190 = tmp5 - tmp181
tmp191 = triton_helpers.maximum(tmp190, tmp34)
tmp192 = tmp189 + tmp191
tmp193 = tmp192 - tmp9
tmp194 = tmp193 * tmp57
tmp195 = tmp194 >= tmp34
tmp196 = tl.where(tmp195, tmp181, tmp179)
tmp197 = tmp180 * tmp12
tmp198 = tmp196 + tmp197
tmp199 = tmp0 - tmp198
tmp200 = triton_helpers.maximum(tmp199, tmp34)
tmp201 = tmp1 - tmp198
tmp202 = triton_helpers.maximum(tmp201, tmp34)
tmp203 = tmp200 + tmp202
tmp204 = tmp3 - tmp198
tmp205 = triton_helpers.maximum(tmp204, tmp34)
tmp206 = tmp203 + tmp205
tmp207 = tmp5 - tmp198
tmp208 = triton_helpers.maximum(tmp207, tmp34)
tmp209 = tmp206 + tmp208
tmp210 = tmp209 - tmp9
tmp211 = tmp210 * tmp57
tmp212 = tmp211 >= tmp34
tmp213 = tl.where(tmp212, tmp198, tmp196)
tmp214 = tmp197 * tmp12
tmp215 = tmp213 + tmp214
tmp216 = tmp0 - tmp215
tmp217 = triton_helpers.maximum(tmp216, tmp34)
tmp218 = tmp1 - tmp215
tmp219 = triton_helpers.maximum(tmp218, tmp34)
tmp220 = tmp217 + tmp219
tmp221 = tmp3 - tmp215
tmp222 = triton_helpers.maximum(tmp221, tmp34)
tmp223 = tmp220 + tmp222
tmp224 = tmp5 - tmp215
tmp225 = triton_helpers.maximum(tmp224, tmp34)
tmp226 = tmp223 + tmp225
tmp227 = tmp226 - tmp9
tmp228 = tmp227 * tmp57
tmp229 = tmp228 >= tmp34
tmp230 = tl.where(tmp229, tmp215, tmp213)
tmp231 = tmp214 * tmp12
tmp232 = tmp230 + tmp231
tmp233 = tmp0 - tmp232
tmp234 = triton_helpers.maximum(tmp233, tmp34)
tmp235 = tmp1 - tmp232
tmp236 = triton_helpers.maximum(tmp235, tmp34)
tmp237 = tmp234 + tmp236
tmp238 = tmp3 - tmp232
tmp239 = triton_helpers.maximum(tmp238, tmp34)
tmp240 = tmp237 + tmp239
tmp241 = tmp5 - tmp232
tmp242 = triton_helpers.maximum(tmp241, tmp34)
tmp243 = tmp240 + tmp242
tmp244 = tmp243 - tmp9
tmp245 = tmp244 * tmp57
tmp246 = tmp245 >= tmp34
tmp247 = tl.where(tmp246, tmp232, tmp230)
tmp248 = tmp231 * tmp12
tmp249 = tmp247 + tmp248
tmp250 = tmp0 - tmp249
tmp251 = triton_helpers.maximum(tmp250, tmp34)
tmp252 = tmp1 - tmp249
tmp253 = triton_helpers.maximum(tmp252, tmp34)
tmp254 = tmp251 + tmp253
tmp255 = tmp3 - tmp249
tmp256 = triton_helpers.maximum(tmp255, tmp34)
tmp257 = tmp254 + tmp256
tmp258 = tmp5 - tmp249
tmp259 = triton_helpers.maximum(tmp258, tmp34)
tmp260 = tmp257 + tmp259
tmp261 = tmp260 - tmp9
tmp262 = tmp261 * tmp57
tmp263 = tmp262 >= tmp34
tmp264 = tl.where(tmp263, tmp249, tmp247)
tmp265 = tmp248 * tmp12
tmp266 = tmp264 + tmp265
tmp267 = tmp0 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp34)
tmp269 = tmp1 - tmp266
tmp270 = triton_helpers.maximum(tmp269, tmp34)
tmp271 = tmp268 + tmp270
tmp272 = tmp3 - tmp266
tmp273 = triton_helpers.maximum(tmp272, tmp34)
tmp274 = tmp271 + tmp273
tmp275 = tmp5 - tmp266
tmp276 = triton_helpers.maximum(tmp275, tmp34)
tmp277 = tmp274 + tmp276
tmp278 = tmp277 - tmp9
tmp279 = tmp278 * tmp57
tmp280 = tmp279 >= tmp34
tmp281 = tl.where(tmp280, tmp266, tmp264)
tmp282 = tmp265 * tmp12
tmp283 = tmp281 + tmp282
tmp284 = tmp0 - tmp283
tmp285 = triton_helpers.maximum(tmp284, tmp34)
tmp286 = tmp1 - tmp283
tmp287 = triton_helpers.maximum(tmp286, tmp34)
tmp288 = tmp285 + tmp287
tmp289 = tmp3 - tmp283
tmp290 = triton_helpers.maximum(tmp289, tmp34)
tmp291 = tmp288 + tmp290
tmp292 = tmp5 - tmp283
tmp293 = triton_helpers.maximum(tmp292, tmp34)
tmp294 = tmp291 + tmp293
tmp295 = tmp294 - tmp9
tmp296 = tmp295 * tmp57
tmp297 = tmp296 >= tmp34
tmp298 = tl.where(tmp297, tmp283, tmp281)
tmp299 = tmp282 * tmp12
tmp300 = tmp298 + tmp299
tmp301 = tmp0 - tmp300
tmp302 = triton_helpers.maximum(tmp301, tmp34)
tmp303 = tmp1 - tmp300
tmp304 = triton_helpers.maximum(tmp303, tmp34)
tmp305 = tmp302 + tmp304
tmp306 = tmp3 - tmp300
tmp307 = triton_helpers.maximum(tmp306, tmp34)
tmp308 = tmp305 + tmp307
tmp309 = tmp5 - tmp300
tmp310 = triton_helpers.maximum(tmp309, tmp34)
tmp311 = tmp308 + tmp310
tmp312 = tmp311 - tmp9
tmp313 = tmp312 * tmp57
tmp314 = tmp313 >= tmp34
tmp315 = tl.where(tmp314, tmp300, tmp298)
tmp316 = tmp299 * tmp12
tmp317 = tmp315 + tmp316
tmp318 = tmp0 - tmp317
tmp319 = triton_helpers.maximum(tmp318, tmp34)
tmp320 = tmp1 - tmp317
tmp321 = triton_helpers.maximum(tmp320, tmp34)
tmp322 = tmp319 + tmp321
tmp323 = tmp3 - tmp317
tmp324 = triton_helpers.maximum(tmp323, tmp34)
tmp325 = tmp322 + tmp324
tmp326 = tmp5 - tmp317
tmp327 = triton_helpers.maximum(tmp326, tmp34)
tmp328 = tmp325 + tmp327
tmp329 = tmp328 - tmp9
tmp330 = tmp329 * tmp57
tmp331 = tmp330 >= tmp34
tmp332 = tl.where(tmp331, tmp317, tmp315)
tmp333 = tmp316 * tmp12
tmp334 = tmp332 + tmp333
tmp335 = tmp0 - tmp334
tmp336 = triton_helpers.maximum(tmp335, tmp34)
tmp337 = tmp1 - tmp334
tmp338 = triton_helpers.maximum(tmp337, tmp34)
tmp339 = tmp336 + tmp338
tmp340 = tmp3 - tmp334
tmp341 = triton_helpers.maximum(tmp340, tmp34)
tmp342 = tmp339 + tmp341
tmp343 = tmp5 - tmp334
tmp344 = triton_helpers.maximum(tmp343, tmp34)
tmp345 = tmp342 + tmp344
tmp346 = tmp345 - tmp9
tmp347 = tmp346 * tmp57
tmp348 = tmp347 >= tmp34
tmp349 = tl.where(tmp348, tmp334, tmp332)
tmp350 = tmp333 * tmp12
tmp351 = tmp349 + tmp350
tmp352 = tmp0 - tmp351
tmp353 = triton_helpers.maximum(tmp352, tmp34)
tmp354 = tmp1 - tmp351
tmp355 = triton_helpers.maximum(tmp354, tmp34)
tmp356 = tmp353 + tmp355
tmp357 = tmp3 - tmp351
tmp358 = triton_helpers.maximum(tmp357, tmp34)
tmp359 = tmp356 + tmp358
tmp360 = tmp5 - tmp351
tmp361 = triton_helpers.maximum(tmp360, tmp34)
tmp362 = tmp359 + tmp361
tmp363 = tmp362 - tmp9
tmp364 = tmp363 * tmp57
tmp365 = tmp364 >= tmp34
tmp366 = tl.where(tmp365, tmp351, tmp349)
tmp367 = tmp350 * tmp12
tmp368 = tmp366 + tmp367
tmp369 = tmp0 - tmp368
tmp370 = triton_helpers.maximum(tmp369, tmp34)
tmp371 = tmp1 - tmp368
tmp372 = triton_helpers.maximum(tmp371, tmp34)
tmp373 = tmp370 + tmp372
tmp374 = tmp3 - tmp368
tmp375 = triton_helpers.maximum(tmp374, tmp34)
tmp376 = tmp373 + tmp375
tmp377 = tmp5 - tmp368
tmp378 = triton_helpers.maximum(tmp377, tmp34)
tmp379 = tmp376 + tmp378
tmp380 = tmp379 - tmp9
tmp381 = tmp380 * tmp57
tmp382 = tmp381 >= tmp34
tmp383 = tl.where(tmp382, tmp368, tmp366)
tmp384 = tmp367 * tmp12
tmp385 = tmp383 + tmp384
tmp386 = tmp0 - tmp385
tmp387 = triton_helpers.maximum(tmp386, tmp34)
tmp388 = tmp1 - tmp385
tmp389 = triton_helpers.maximum(tmp388, tmp34)
tmp390 = tmp387 + tmp389
tmp391 = tmp3 - tmp385
tmp392 = triton_helpers.maximum(tmp391, tmp34)
tmp393 = tmp390 + tmp392
tmp394 = tmp5 - tmp385
tmp395 = triton_helpers.maximum(tmp394, tmp34)
tmp396 = tmp393 + tmp395
tmp397 = tmp396 - tmp9
tmp398 = tmp397 * tmp57
tmp399 = tmp398 >= tmp34
tmp400 = tl.where(tmp399, tmp385, tmp383)
tmp401 = tmp384 * tmp12
tmp402 = tmp400 + tmp401
tmp403 = tmp0 - tmp402
tmp404 = triton_helpers.maximum(tmp403, tmp34)
tmp405 = tmp1 - tmp402
tmp406 = triton_helpers.maximum(tmp405, tmp34)
tmp407 = tmp404 + tmp406
tmp408 = tmp3 - tmp402
tmp409 = triton_helpers.maximum(tmp408, tmp34)
tmp410 = tmp407 + tmp409
tmp411 = tmp5 - tmp402
tmp412 = triton_helpers.maximum(tmp411, tmp34)
tmp413 = tmp410 + tmp412
tmp414 = tmp413 - tmp9
tmp415 = tmp414 * tmp57
tmp416 = tmp415 >= tmp34
tmp417 = tl.where(tmp416, tmp402, tmp400)
tmp418 = tmp401 * tmp12
tmp419 = tmp417 + tmp418
tmp420 = tmp0 - tmp419
tmp421 = triton_helpers.maximum(tmp420, tmp34)
tmp422 = tmp1 - tmp419
tmp423 = triton_helpers.maximum(tmp422, tmp34)
tmp424 = tmp421 + tmp423
tmp425 = tmp3 - tmp419
tmp426 = triton_helpers.maximum(tmp425, tmp34)
tmp427 = tmp424 + tmp426
tmp428 = tmp5 - tmp419
tmp429 = triton_helpers.maximum(tmp428, tmp34)
tmp430 = tmp427 + tmp429
tmp431 = tmp430 - tmp9
tmp432 = tmp431 * tmp57
tmp433 = tmp432 >= tmp34
tmp434 = tl.where(tmp433, tmp419, tmp417)
tl.store(out_ptr0 + (x0), tmp30, xmask)
tl.store(in_out_ptr16 + (x0), tmp434, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yz/cyzkhbz7eylqmmsct2eisz274ptkf4xwckrryhdzr5c5xpld426a.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_86, p_m_40], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# p_m_40 => clamp_min_41
# sub_86 => sub_86
# tau_m_40 => add_40
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %sub_86 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_40), kwargs = {})
# %clamp_min_41 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_86, 0), kwargs = {})
triton_poi_fused_add_clamp_div_sub_2 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp1 + tmp26
tmp28 = tmp0 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sr/csrhhpesnpea33gn4n5cx2now3swxe3jsyebpywtfcwh4ny67d7d.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sum_42, f_m_40, mul_41, tau_lo_41], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# f_lo => sub_4
# f_m_40 => sub_87
# max_1 => max_1
# mul_41 => mul_41
# sub_3 => sub_3
# sum_1 => sum_1
# sum_42 => sum_42
# tau_lo => sub_1
# tau_lo_41 => where_40
# tau_m_40 => add_40
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_41, [-1]), kwargs = {})
# %sub_87 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_42, 1), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_87, %sub_4), kwargs = {})
# %where_40 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_40, %add_40, %where_39), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (x0), xmask)
tmp33 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp32 + tmp57
tmp59 = tl.where(tmp31, tmp58, tmp32)
tl.store(in_out_ptr0 + (x0), tmp59, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ri/cri5g5cwgob26dhdepr7at5q3uqt7g5zn5i44mttvzuefjefarlt.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_88, p_m_41], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# p_m_41 => clamp_min_42
# sub_88 => sub_88
# tau_m_41 => add_41
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {})
# %sub_88 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_41), kwargs = {})
# %clamp_min_42 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_88, 0), kwargs = {})
triton_poi_fused_add_clamp_div_sub_4 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp1 + tmp27
tmp29 = tmp0 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6v/c6v66pcoz5wwqqj3qdoavhcalhw4ggic7f5bbgnf4wulrawivqmo.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sum_43, f_m_41, mul_42, tau_lo_42], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# f_lo => sub_4
# f_m_41 => sub_89
# max_1 => max_1
# mul_42 => mul_42
# sub_3 => sub_3
# sum_1 => sum_1
# sum_43 => sum_43
# tau_lo => sub_1
# tau_lo_42 => where_41
# tau_m_41 => add_41
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {})
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_42, [-1]), kwargs = {})
# %sub_89 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_43, 1), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_89, %sub_4), kwargs = {})
# %where_41 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_41, %add_41, %where_40), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (x0), xmask)
tmp33 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp57 * tmp34
tmp59 = tmp32 + tmp58
tmp60 = tl.where(tmp31, tmp59, tmp32)
tl.store(in_out_ptr0 + (x0), tmp60, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lg/clgmpssl52or2lky2wemwq23igzgtq7po6uel2vq4hq3f6g72lml.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_90], Original ATen: [aten.div, aten.add, aten.sub]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# sub_90 => sub_90
# tau_m_42 => add_42
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {})
# %sub_90 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_42), kwargs = {})
triton_poi_fused_add_div_sub_6 = async_compile.triton('triton_poi_fused_add_div_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp1 + tmp28
tmp30 = tmp0 - tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jl/cjlkx2otyy2fduimnbn7ccxqmgq2wq4zlcyndzdk767y2f6xq7z2.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# f_lo => sub_4
# f_m_42 => sub_91
# max_1 => max_1
# mul_43 => mul_43
# p_m_42 => clamp_min_43
# sub_3 => sub_3
# sum_1 => sum_1
# sum_44 => sum_44
# tau_lo => sub_1
# tau_lo_43 => where_42
# tau_m_42 => add_42
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {})
# %clamp_min_43 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_90, 0), kwargs = {})
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_43, [-1]), kwargs = {})
# %sub_91 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_44, 1), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_91, %sub_4), kwargs = {})
# %where_42 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_42, %add_42, %where_41), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + (x0), xmask)
tmp37 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp36 + tmp63
tmp65 = tl.where(tmp35, tmp64, tmp36)
tl.store(in_out_ptr0 + (x0), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/45/c45ntljmhht4tqet2q6qjdfrins55gu6vaasohzaiqpfex6sk7ob.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_92], Original ATen: [aten.div, aten.add, aten.sub]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# dm_44 => div_43
# sub_92 => sub_92
# tau_m_43 => add_43
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {})
# %sub_92 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_43), kwargs = {})
triton_poi_fused_add_div_sub_8 = async_compile.triton('triton_poi_fused_add_div_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp28 * tmp3
tmp30 = tmp1 + tmp29
tmp31 = tmp0 - tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lz/clzcekwx3vzl5uustsl63asl5aemxd4bfzovv6jgg7f5yz6ljzjf.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, p_m_43, sum_45, f_m_43, mul_44, tau_lo_44, dm_45, tau_m_44, sub_94, p_m_44, sum_46, f_m_44, mul_45, tau_lo_45, dm_46, tau_m_45, sub_96, p_m_45, sum_47, f_m_45, mul_46, tau_lo_46, dm_47, tau_m_46, sub_98, p_m_46, sum_48, f_m_46, mul_47, tau_lo_47, dm_48, tau_m_47, sub_100, p_m_47, sum_49, f_m_47, mul_48, tau_lo_48, dm_49, tau_m_48, sub_102, p_m_48, sum_50, f_m_48, tau_lo_49, dm_50, tau_m_49, sub_104, p_m_49, sum_52], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# dm_44 => div_43
# dm_45 => div_44
# dm_46 => div_45
# dm_47 => div_46
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# f_lo => sub_4
# f_m_43 => sub_93
# f_m_44 => sub_95
# f_m_45 => sub_97
# f_m_46 => sub_99
# f_m_47 => sub_101
# f_m_48 => sub_103
# max_1 => max_1
# mul_44 => mul_44
# mul_45 => mul_45
# mul_46 => mul_46
# mul_47 => mul_47
# mul_48 => mul_48
# p_m_43 => clamp_min_44
# p_m_44 => clamp_min_45
# p_m_45 => clamp_min_46
# p_m_46 => clamp_min_47
# p_m_47 => clamp_min_48
# p_m_48 => clamp_min_49
# p_m_49 => clamp_min_50
# sub_100 => sub_100
# sub_102 => sub_102
# sub_104 => sub_104
# sub_3 => sub_3
# sub_94 => sub_94
# sub_96 => sub_96
# sub_98 => sub_98
# sum_1 => sum_1
# sum_45 => sum_45
# sum_46 => sum_46
# sum_47 => sum_47
# sum_48 => sum_48
# sum_49 => sum_49
# sum_50 => sum_50
# sum_52 => sum_52
# tau_lo => sub_1
# tau_lo_44 => where_43
# tau_lo_45 => where_44
# tau_lo_46 => where_45
# tau_lo_47 => where_46
# tau_lo_48 => where_47
# tau_lo_49 => where_48
# tau_m_43 => add_43
# tau_m_44 => add_44
# tau_m_45 => add_45
# tau_m_46 => add_46
# tau_m_47 => add_47
# tau_m_48 => add_48
# tau_m_49 => add_49
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {})
# %clamp_min_44 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_92, 0), kwargs = {})
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_44, [-1]), kwargs = {})
# %sub_93 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_45, 1), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_93, %sub_4), kwargs = {})
# %where_43 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_43, %add_43, %where_42), kwargs = {})
# %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {})
# %add_44 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_43, %div_44), kwargs = {})
# %sub_94 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_44), kwargs = {})
# %clamp_min_45 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_94, 0), kwargs = {})
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_45, [-1]), kwargs = {})
# %sub_95 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_46, 1), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_95, %sub_4), kwargs = {})
# %where_44 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_44, %add_44, %where_43), kwargs = {})
# %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {})
# %add_45 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_44, %div_45), kwargs = {})
# %sub_96 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_45), kwargs = {})
# %clamp_min_46 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_96, 0), kwargs = {})
# %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_46, [-1]), kwargs = {})
# %sub_97 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_47, 1), kwargs = {})
# %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_97, %sub_4), kwargs = {})
# %where_45 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_45, %add_45, %where_44), kwargs = {})
# %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {})
# %add_46 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_45, %div_46), kwargs = {})
# %sub_98 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_46), kwargs = {})
# %clamp_min_47 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_98, 0), kwargs = {})
# %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_47, [-1]), kwargs = {})
# %sub_99 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_48, 1), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_99, %sub_4), kwargs = {})
# %where_46 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_46, %add_46, %where_45), kwargs = {})
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {})
# %sub_100 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_47), kwargs = {})
# %clamp_min_48 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_100, 0), kwargs = {})
# %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_48, [-1]), kwargs = {})
# %sub_101 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_49, 1), kwargs = {})
# %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_101, %sub_4), kwargs = {})
# %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {})
# %sub_102 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_48), kwargs = {})
# %clamp_min_49 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_102, 0), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_49, [-1]), kwargs = {})
# %sub_103 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_50, 1), kwargs = {})
# %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_104 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_104, 0), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_50, [-1]), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + (x0), xmask)
tmp37 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp63 * tmp38
tmp65 = tmp36 + tmp64
tmp66 = tl.where(tmp35, tmp65, tmp36)
tmp67 = tmp64 * tmp38
tmp68 = tmp66 + tmp67
tmp69 = tmp14 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = tmp15 - tmp68
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = tmp70 + tmp72
tmp74 = tmp17 - tmp68
tmp75 = triton_helpers.maximum(tmp74, tmp1)
tmp76 = tmp73 + tmp75
tmp77 = tmp19 - tmp68
tmp78 = triton_helpers.maximum(tmp77, tmp1)
tmp79 = tmp76 + tmp78
tmp80 = tmp79 - tmp12
tmp81 = tmp80 * tmp33
tmp82 = tmp81 >= tmp1
tmp83 = tl.where(tmp82, tmp68, tmp66)
tmp84 = tmp67 * tmp38
tmp85 = tmp83 + tmp84
tmp86 = tmp14 - tmp85
tmp87 = triton_helpers.maximum(tmp86, tmp1)
tmp88 = tmp15 - tmp85
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = tmp87 + tmp89
tmp91 = tmp17 - tmp85
tmp92 = triton_helpers.maximum(tmp91, tmp1)
tmp93 = tmp90 + tmp92
tmp94 = tmp19 - tmp85
tmp95 = triton_helpers.maximum(tmp94, tmp1)
tmp96 = tmp93 + tmp95
tmp97 = tmp96 - tmp12
tmp98 = tmp97 * tmp33
tmp99 = tmp98 >= tmp1
tmp100 = tl.where(tmp99, tmp85, tmp83)
tmp101 = tmp84 * tmp38
tmp102 = tmp100 + tmp101
tmp103 = tmp14 - tmp102
tmp104 = triton_helpers.maximum(tmp103, tmp1)
tmp105 = tmp15 - tmp102
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = tmp104 + tmp106
tmp108 = tmp17 - tmp102
tmp109 = triton_helpers.maximum(tmp108, tmp1)
tmp110 = tmp107 + tmp109
tmp111 = tmp19 - tmp102
tmp112 = triton_helpers.maximum(tmp111, tmp1)
tmp113 = tmp110 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp33
tmp116 = tmp115 >= tmp1
tmp117 = tl.where(tmp116, tmp102, tmp100)
tmp118 = tmp101 * tmp38
tmp119 = tmp117 + tmp118
tmp120 = tmp14 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp1)
tmp122 = tmp15 - tmp119
tmp123 = triton_helpers.maximum(tmp122, tmp1)
tmp124 = tmp121 + tmp123
tmp125 = tmp17 - tmp119
tmp126 = triton_helpers.maximum(tmp125, tmp1)
tmp127 = tmp124 + tmp126
tmp128 = tmp19 - tmp119
tmp129 = triton_helpers.maximum(tmp128, tmp1)
tmp130 = tmp127 + tmp129
tmp131 = tmp130 - tmp12
tmp132 = tmp131 * tmp33
tmp133 = tmp132 >= tmp1
tmp134 = tl.where(tmp133, tmp119, tmp117)
tmp135 = tmp118 * tmp38
tmp136 = tmp134 + tmp135
tmp137 = tmp14 - tmp136
tmp138 = triton_helpers.maximum(tmp137, tmp1)
tmp139 = tmp15 - tmp136
tmp140 = triton_helpers.maximum(tmp139, tmp1)
tmp141 = tmp138 + tmp140
tmp142 = tmp17 - tmp136
tmp143 = triton_helpers.maximum(tmp142, tmp1)
tmp144 = tmp141 + tmp143
tmp145 = tmp19 - tmp136
tmp146 = triton_helpers.maximum(tmp145, tmp1)
tmp147 = tmp144 + tmp146
tmp148 = tmp147 - tmp12
tmp149 = tmp148 * tmp33
tmp150 = tmp149 >= tmp1
tmp151 = tl.where(tmp150, tmp136, tmp134)
tmp152 = tmp135 * tmp38
tmp153 = tmp151 + tmp152
tmp154 = tmp14 - tmp153
tmp155 = triton_helpers.maximum(tmp154, tmp1)
tmp156 = tmp15 - tmp153
tmp157 = triton_helpers.maximum(tmp156, tmp1)
tmp158 = tmp155 + tmp157
tmp159 = tmp17 - tmp153
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = tmp158 + tmp160
tmp162 = tmp19 - tmp153
tmp163 = triton_helpers.maximum(tmp162, tmp1)
tmp164 = tmp161 + tmp163
tl.store(out_ptr4 + (x0), tmp101, xmask)
tl.store(in_out_ptr2 + (x0), tmp151, xmask)
tl.store(out_ptr7 + (x0), tmp164, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qe/cqe4wxdrqmlpdjpsore2xrtnu7imed3hrgbsaycsyw5yzkt5vtf2.py
# Topologically Sorted Source Nodes: [dm_48, dm_49, dm_50, tau_m_49, sub_104, p_m_49, p_m_50], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# p_m_49 => clamp_min_50
# p_m_50 => div_50
# sub_104 => sub_104
# tau_m_49 => add_49
# Graph fragment:
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_104 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_104, 0), kwargs = {})
# %div_50 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_min_50, %unsqueeze_50), kwargs = {})
triton_poi_fused_add_clamp_div_sub_10 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp1 + tmp6
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf40 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, tau_m, sub_6, p_m, sum_2, f_m, sub_3, clamp, sum_1, f_lo, mul_1, tau_lo_1, dm_2, tau_m_1, sub_8, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_10, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_12, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_14, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_16, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_18, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_20, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_22, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_24, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_26, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_28, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_30, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_32, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_34, p_m_14, sum_16, f_m_14, mul_15, tau_lo_15, dm_16, tau_m_15, sub_36, p_m_15, sum_17, f_m_15, mul_16, tau_lo_16, dm_17, tau_m_16, sub_38, p_m_16, sum_18, f_m_16, mul_17, tau_lo_17], Original ATen: [aten.max, aten.sub, aten.div, aten.add, aten.clamp, aten.sum, aten.mul, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0.run(buf41, arg0_1, 64, grid=grid(64), stream=stream0)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf82 = reinterpret_tensor(buf81, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf81 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, sub_3, clamp, sum_1, f_lo, dm_2, dm_3, dm_4, dm_5, dm_6, dm_7, dm_8, dm_9, dm_10, dm_11, dm_12, dm_13, dm_14, dm_15, dm_16, dm_17, dm_18, tau_m_17, sub_40, p_m_17, sum_19, f_m_17, mul_18, tau_lo_18, dm_19, tau_m_18, sub_42, p_m_18, sum_20, f_m_18, mul_19, tau_lo_19, dm_20, tau_m_19, sub_44, p_m_19, sum_21, f_m_19, tau_lo_20, dm_21, tau_m_20, sub_46, p_m_20, sum_22, f_m_20, mul_21, tau_lo_21, dm_22, tau_m_21, sub_48, p_m_21, sum_23, f_m_21, mul_22, tau_lo_22, dm_23, tau_m_22, sub_50, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_52, p_m_23, sum_25, f_m_23, mul_24, tau_lo_24, dm_25, tau_m_24, sub_54, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_56, p_m_25, sum_27, f_m_25, mul_26, tau_lo_26, dm_27, tau_m_26, sub_58, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_60, p_m_27, sum_29, f_m_27, tau_lo_28, dm_29, tau_m_28, sub_62, p_m_28, sum_30, tau_lo_29, dm_30, tau_m_29, sub_64, p_m_29, sum_31, tau_lo_30, dm_31, tau_m_30, sub_66, p_m_30, sum_32, tau_lo_31, dm_32, tau_m_31, sub_68, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_70, p_m_32, sum_34, tau_lo_33, dm_34, tau_m_33, sub_72, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_74, p_m_34, sum_36, tau_lo_35, dm_36, tau_m_35, sub_76, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_78, p_m_36, sum_38, tau_lo_37, dm_38, tau_m_37, sub_80, p_m_37, sum_39, tau_lo_38, dm_39, tau_m_38, sub_82, p_m_38, sum_40, tau_lo_39, dm_40, tau_m_39, sub_84, p_m_39, sum_41, tau_lo_40], Original ATen: [aten.max, aten.sub, aten.div, aten.clamp, aten.sum, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1.run(buf82, arg0_1, buf41, buf42, 64, grid=grid(64), stream=stream0)
buf83 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_86, p_m_40], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_2.run(arg0_1, buf82, buf42, buf83, 256, grid=grid(256), stream=stream0)
buf85 = buf82; del buf82 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sum_42, f_m_40, mul_41, tau_lo_41], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3.run(buf85, buf83, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf86 = buf83; del buf83 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_88, p_m_41], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_4.run(arg0_1, buf85, buf42, buf86, 256, grid=grid(256), stream=stream0)
buf88 = buf85; del buf85 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sum_43, f_m_41, mul_42, tau_lo_42], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5.run(buf88, buf86, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf89 = buf86; del buf86 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_90], Original ATen: [aten.div, aten.add, aten.sub]
triton_poi_fused_add_div_sub_6.run(arg0_1, buf88, buf42, buf89, 256, grid=grid(256), stream=stream0)
buf91 = buf88; del buf88 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7.run(buf91, buf89, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf92 = buf89; del buf89 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_92], Original ATen: [aten.div, aten.add, aten.sub]
triton_poi_fused_add_div_sub_8.run(arg0_1, buf91, buf42, buf92, 256, grid=grid(256), stream=stream0)
buf94 = buf91; del buf91 # reuse
buf100 = buf41; del buf41 # reuse
buf103 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf104 = reinterpret_tensor(buf103, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf103 # reuse
buf105 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, p_m_43, sum_45, f_m_43, mul_44, tau_lo_44, dm_45, tau_m_44, sub_94, p_m_44, sum_46, f_m_44, mul_45, tau_lo_45, dm_46, tau_m_45, sub_96, p_m_45, sum_47, f_m_45, mul_46, tau_lo_46, dm_47, tau_m_46, sub_98, p_m_46, sum_48, f_m_46, mul_47, tau_lo_47, dm_48, tau_m_47, sub_100, p_m_47, sum_49, f_m_47, mul_48, tau_lo_48, dm_49, tau_m_48, sub_102, p_m_48, sum_50, f_m_48, tau_lo_49, dm_50, tau_m_49, sub_104, p_m_49, sum_52], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9.run(buf94, buf104, buf92, arg0_1, buf42, buf100, buf105, 64, grid=grid(64), stream=stream0)
del buf42
del buf94
buf106 = buf92; del buf92 # reuse
# Topologically Sorted Source Nodes: [dm_48, dm_49, dm_50, tau_m_49, sub_104, p_m_49, p_m_50], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_10.run(arg0_1, buf104, buf100, buf105, buf106, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf100
del buf104
del buf105
return (buf106, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):
"""sparsemax: normalizing sparse transform (a la softmax), via bisection.
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Note: This function does not yet support normalizing along anything except
the last dimension. Please use transposing and views to achieve more
general behavior.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class SparsemaxBisectFunction(EntmaxBisectFunction):
@classmethod
def _gp(cls, x, alpha):
return x
@classmethod
def _gp_inv(cls, y, alpha):
return y
@classmethod
def _p(cls, x, alpha):
return torch.clamp(x, min=0)
@classmethod
def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True):
return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50,
ensure_sum_one=True)
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y > 0
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None, None
class SparsemaxBisect(nn.Module):
def __init__(self, dim=-1, n_iter=None):
"""sparsemax: normalizing sparse transform (a la softmax) via bisection
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
super().__init__()
def forward(self, X):
return sparsemax_bisect(X, dim=self.dim, n_iter=self.n_iter)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0(in_out_ptr8,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp9 = 0.25
tmp10 = tmp6 - tmp9
tmp11 = tmp10 - tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = tmp0 - tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp1 - tmp14
tmp19 = triton_helpers.maximum(tmp18, tmp16)
tmp20 = tmp17 + tmp19
tmp21 = tmp3 - tmp14
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = tmp20 + tmp22
tmp24 = tmp5 - tmp14
tmp25 = triton_helpers.maximum(tmp24, tmp16)
tmp26 = tmp23 + tmp25
tmp27 = tmp26 - tmp7
tmp28 = tmp0 - tmp8
tmp29 = triton_helpers.maximum(tmp28, tmp16)
tmp30 = tmp1 - tmp8
tmp31 = triton_helpers.maximum(tmp30, tmp16)
tmp32 = tmp29 + tmp31
tmp33 = tmp3 - tmp8
tmp34 = triton_helpers.maximum(tmp33, tmp16)
tmp35 = tmp32 + tmp34
tmp36 = tmp5 - tmp8
tmp37 = triton_helpers.maximum(tmp36, tmp16)
tmp38 = tmp35 + tmp37
tmp39 = tmp38 - tmp7
tmp40 = tmp27 * tmp39
tmp41 = tmp40 >= tmp16
tmp42 = tl.where(tmp41, tmp14, tmp8)
tmp43 = tmp13 * tmp12
tmp44 = tmp42 + tmp43
tmp45 = tmp0 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp16)
tmp47 = tmp1 - tmp44
tmp48 = triton_helpers.maximum(tmp47, tmp16)
tmp49 = tmp46 + tmp48
tmp50 = tmp3 - tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp16)
tmp52 = tmp49 + tmp51
tmp53 = tmp5 - tmp44
tmp54 = triton_helpers.maximum(tmp53, tmp16)
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp7
tmp57 = tmp56 * tmp39
tmp58 = tmp57 >= tmp16
tmp59 = tl.where(tmp58, tmp44, tmp42)
tmp60 = tmp43 * tmp12
tmp61 = tmp59 + tmp60
tmp62 = tmp0 - tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp16)
tmp64 = tmp1 - tmp61
tmp65 = triton_helpers.maximum(tmp64, tmp16)
tmp66 = tmp63 + tmp65
tmp67 = tmp3 - tmp61
tmp68 = triton_helpers.maximum(tmp67, tmp16)
tmp69 = tmp66 + tmp68
tmp70 = tmp5 - tmp61
tmp71 = triton_helpers.maximum(tmp70, tmp16)
tmp72 = tmp69 + tmp71
tmp73 = tmp72 - tmp7
tmp74 = tmp73 * tmp39
tmp75 = tmp74 >= tmp16
tmp76 = tl.where(tmp75, tmp61, tmp59)
tmp77 = tmp60 * tmp12
tmp78 = tmp76 + tmp77
tmp79 = tmp0 - tmp78
tmp80 = triton_helpers.maximum(tmp79, tmp16)
tmp81 = tmp1 - tmp78
tmp82 = triton_helpers.maximum(tmp81, tmp16)
tmp83 = tmp80 + tmp82
tmp84 = tmp3 - tmp78
tmp85 = triton_helpers.maximum(tmp84, tmp16)
tmp86 = tmp83 + tmp85
tmp87 = tmp5 - tmp78
tmp88 = triton_helpers.maximum(tmp87, tmp16)
tmp89 = tmp86 + tmp88
tmp90 = tmp89 - tmp7
tmp91 = tmp90 * tmp39
tmp92 = tmp91 >= tmp16
tmp93 = tl.where(tmp92, tmp78, tmp76)
tmp94 = tmp77 * tmp12
tmp95 = tmp93 + tmp94
tmp96 = tmp0 - tmp95
tmp97 = triton_helpers.maximum(tmp96, tmp16)
tmp98 = tmp1 - tmp95
tmp99 = triton_helpers.maximum(tmp98, tmp16)
tmp100 = tmp97 + tmp99
tmp101 = tmp3 - tmp95
tmp102 = triton_helpers.maximum(tmp101, tmp16)
tmp103 = tmp100 + tmp102
tmp104 = tmp5 - tmp95
tmp105 = triton_helpers.maximum(tmp104, tmp16)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 - tmp7
tmp108 = tmp107 * tmp39
tmp109 = tmp108 >= tmp16
tmp110 = tl.where(tmp109, tmp95, tmp93)
tmp111 = tmp94 * tmp12
tmp112 = tmp110 + tmp111
tmp113 = tmp0 - tmp112
tmp114 = triton_helpers.maximum(tmp113, tmp16)
tmp115 = tmp1 - tmp112
tmp116 = triton_helpers.maximum(tmp115, tmp16)
tmp117 = tmp114 + tmp116
tmp118 = tmp3 - tmp112
tmp119 = triton_helpers.maximum(tmp118, tmp16)
tmp120 = tmp117 + tmp119
tmp121 = tmp5 - tmp112
tmp122 = triton_helpers.maximum(tmp121, tmp16)
tmp123 = tmp120 + tmp122
tmp124 = tmp123 - tmp7
tmp125 = tmp124 * tmp39
tmp126 = tmp125 >= tmp16
tmp127 = tl.where(tmp126, tmp112, tmp110)
tmp128 = tmp111 * tmp12
tmp129 = tmp127 + tmp128
tmp130 = tmp0 - tmp129
tmp131 = triton_helpers.maximum(tmp130, tmp16)
tmp132 = tmp1 - tmp129
tmp133 = triton_helpers.maximum(tmp132, tmp16)
tmp134 = tmp131 + tmp133
tmp135 = tmp3 - tmp129
tmp136 = triton_helpers.maximum(tmp135, tmp16)
tmp137 = tmp134 + tmp136
tmp138 = tmp5 - tmp129
tmp139 = triton_helpers.maximum(tmp138, tmp16)
tmp140 = tmp137 + tmp139
tmp141 = tmp140 - tmp7
tmp142 = tmp141 * tmp39
tmp143 = tmp142 >= tmp16
tmp144 = tl.where(tmp143, tmp129, tmp127)
tmp145 = tmp128 * tmp12
tmp146 = tmp144 + tmp145
tmp147 = tmp0 - tmp146
tmp148 = triton_helpers.maximum(tmp147, tmp16)
tmp149 = tmp1 - tmp146
tmp150 = triton_helpers.maximum(tmp149, tmp16)
tmp151 = tmp148 + tmp150
tmp152 = tmp3 - tmp146
tmp153 = triton_helpers.maximum(tmp152, tmp16)
tmp154 = tmp151 + tmp153
tmp155 = tmp5 - tmp146
tmp156 = triton_helpers.maximum(tmp155, tmp16)
tmp157 = tmp154 + tmp156
tmp158 = tmp157 - tmp7
tmp159 = tmp158 * tmp39
tmp160 = tmp159 >= tmp16
tmp161 = tl.where(tmp160, tmp146, tmp144)
tmp162 = tmp145 * tmp12
tmp163 = tmp161 + tmp162
tmp164 = tmp0 - tmp163
tmp165 = triton_helpers.maximum(tmp164, tmp16)
tmp166 = tmp1 - tmp163
tmp167 = triton_helpers.maximum(tmp166, tmp16)
tmp168 = tmp165 + tmp167
tmp169 = tmp3 - tmp163
tmp170 = triton_helpers.maximum(tmp169, tmp16)
tmp171 = tmp168 + tmp170
tmp172 = tmp5 - tmp163
tmp173 = triton_helpers.maximum(tmp172, tmp16)
tmp174 = tmp171 + tmp173
tmp175 = tmp174 - tmp7
tmp176 = tmp175 * tmp39
tmp177 = tmp176 >= tmp16
tmp178 = tl.where(tmp177, tmp163, tmp161)
tmp179 = tmp162 * tmp12
tmp180 = tmp178 + tmp179
tmp181 = tmp0 - tmp180
tmp182 = triton_helpers.maximum(tmp181, tmp16)
tmp183 = tmp1 - tmp180
tmp184 = triton_helpers.maximum(tmp183, tmp16)
tmp185 = tmp182 + tmp184
tmp186 = tmp3 - tmp180
tmp187 = triton_helpers.maximum(tmp186, tmp16)
tmp188 = tmp185 + tmp187
tmp189 = tmp5 - tmp180
tmp190 = triton_helpers.maximum(tmp189, tmp16)
tmp191 = tmp188 + tmp190
tmp192 = tmp191 - tmp7
tmp193 = tmp192 * tmp39
tmp194 = tmp193 >= tmp16
tmp195 = tl.where(tmp194, tmp180, tmp178)
tmp196 = tmp179 * tmp12
tmp197 = tmp195 + tmp196
tmp198 = tmp0 - tmp197
tmp199 = triton_helpers.maximum(tmp198, tmp16)
tmp200 = tmp1 - tmp197
tmp201 = triton_helpers.maximum(tmp200, tmp16)
tmp202 = tmp199 + tmp201
tmp203 = tmp3 - tmp197
tmp204 = triton_helpers.maximum(tmp203, tmp16)
tmp205 = tmp202 + tmp204
tmp206 = tmp5 - tmp197
tmp207 = triton_helpers.maximum(tmp206, tmp16)
tmp208 = tmp205 + tmp207
tmp209 = tmp208 - tmp7
tmp210 = tmp209 * tmp39
tmp211 = tmp210 >= tmp16
tmp212 = tl.where(tmp211, tmp197, tmp195)
tmp213 = tmp196 * tmp12
tmp214 = tmp212 + tmp213
tmp215 = tmp0 - tmp214
tmp216 = triton_helpers.maximum(tmp215, tmp16)
tmp217 = tmp1 - tmp214
tmp218 = triton_helpers.maximum(tmp217, tmp16)
tmp219 = tmp216 + tmp218
tmp220 = tmp3 - tmp214
tmp221 = triton_helpers.maximum(tmp220, tmp16)
tmp222 = tmp219 + tmp221
tmp223 = tmp5 - tmp214
tmp224 = triton_helpers.maximum(tmp223, tmp16)
tmp225 = tmp222 + tmp224
tmp226 = tmp225 - tmp7
tmp227 = tmp226 * tmp39
tmp228 = tmp227 >= tmp16
tmp229 = tl.where(tmp228, tmp214, tmp212)
tmp230 = tmp213 * tmp12
tmp231 = tmp229 + tmp230
tmp232 = tmp0 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp16)
tmp234 = tmp1 - tmp231
tmp235 = triton_helpers.maximum(tmp234, tmp16)
tmp236 = tmp233 + tmp235
tmp237 = tmp3 - tmp231
tmp238 = triton_helpers.maximum(tmp237, tmp16)
tmp239 = tmp236 + tmp238
tmp240 = tmp5 - tmp231
tmp241 = triton_helpers.maximum(tmp240, tmp16)
tmp242 = tmp239 + tmp241
tmp243 = tmp242 - tmp7
tmp244 = tmp243 * tmp39
tmp245 = tmp244 >= tmp16
tmp246 = tl.where(tmp245, tmp231, tmp229)
tmp247 = tmp230 * tmp12
tmp248 = tmp246 + tmp247
tmp249 = tmp0 - tmp248
tmp250 = triton_helpers.maximum(tmp249, tmp16)
tmp251 = tmp1 - tmp248
tmp252 = triton_helpers.maximum(tmp251, tmp16)
tmp253 = tmp250 + tmp252
tmp254 = tmp3 - tmp248
tmp255 = triton_helpers.maximum(tmp254, tmp16)
tmp256 = tmp253 + tmp255
tmp257 = tmp5 - tmp248
tmp258 = triton_helpers.maximum(tmp257, tmp16)
tmp259 = tmp256 + tmp258
tmp260 = tmp259 - tmp7
tmp261 = tmp260 * tmp39
tmp262 = tmp261 >= tmp16
tmp263 = tl.where(tmp262, tmp248, tmp246)
tmp264 = tmp247 * tmp12
tmp265 = tmp263 + tmp264
tmp266 = tmp0 - tmp265
tmp267 = triton_helpers.maximum(tmp266, tmp16)
tmp268 = tmp1 - tmp265
tmp269 = triton_helpers.maximum(tmp268, tmp16)
tmp270 = tmp267 + tmp269
tmp271 = tmp3 - tmp265
tmp272 = triton_helpers.maximum(tmp271, tmp16)
tmp273 = tmp270 + tmp272
tmp274 = tmp5 - tmp265
tmp275 = triton_helpers.maximum(tmp274, tmp16)
tmp276 = tmp273 + tmp275
tmp277 = tmp276 - tmp7
tmp278 = tmp277 * tmp39
tmp279 = tmp278 >= tmp16
tmp280 = tl.where(tmp279, tmp265, tmp263)
tmp281 = tmp264 * tmp12
tmp282 = tmp280 + tmp281
tmp283 = tmp0 - tmp282
tmp284 = triton_helpers.maximum(tmp283, tmp16)
tmp285 = tmp1 - tmp282
tmp286 = triton_helpers.maximum(tmp285, tmp16)
tmp287 = tmp284 + tmp286
tmp288 = tmp3 - tmp282
tmp289 = triton_helpers.maximum(tmp288, tmp16)
tmp290 = tmp287 + tmp289
tmp291 = tmp5 - tmp282
tmp292 = triton_helpers.maximum(tmp291, tmp16)
tmp293 = tmp290 + tmp292
tmp294 = tmp293 - tmp7
tmp295 = tmp294 * tmp39
tmp296 = tmp295 >= tmp16
tmp297 = tl.where(tmp296, tmp282, tmp280)
tmp298 = tmp281 * tmp12
tmp299 = tmp297 + tmp298
tmp300 = tmp0 - tmp299
tmp301 = triton_helpers.maximum(tmp300, tmp16)
tmp302 = tmp1 - tmp299
tmp303 = triton_helpers.maximum(tmp302, tmp16)
tmp304 = tmp301 + tmp303
tmp305 = tmp3 - tmp299
tmp306 = triton_helpers.maximum(tmp305, tmp16)
tmp307 = tmp304 + tmp306
tmp308 = tmp5 - tmp299
tmp309 = triton_helpers.maximum(tmp308, tmp16)
tmp310 = tmp307 + tmp309
tmp311 = tmp310 - tmp7
tmp312 = tmp311 * tmp39
tmp313 = tmp312 >= tmp16
tmp314 = tl.where(tmp313, tmp299, tmp297)
tl.store(in_out_ptr8 + x0, tmp314, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1(in_out_ptr16,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + x0, xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.25
tmp8 = tmp6 - tmp7
tmp9 = 1.0
tmp10 = tmp6 - tmp9
tmp11 = tmp8 - tmp10
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp12
tmp15 = tmp14 * tmp12
tmp16 = tmp15 * tmp12
tmp17 = tmp16 * tmp12
tmp18 = tmp17 * tmp12
tmp19 = tmp18 * tmp12
tmp20 = tmp19 * tmp12
tmp21 = tmp20 * tmp12
tmp22 = tmp21 * tmp12
tmp23 = tmp22 * tmp12
tmp24 = tmp23 * tmp12
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp12
tmp27 = tmp26 * tmp12
tmp28 = tmp27 * tmp12
tmp29 = tmp28 * tmp12
tmp30 = tmp29 * tmp12
tmp32 = tmp31 + tmp30
tmp33 = tmp0 - tmp32
tmp34 = 0.0
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp1 - tmp32
tmp37 = triton_helpers.maximum(tmp36, tmp34)
tmp38 = tmp35 + tmp37
tmp39 = tmp3 - tmp32
tmp40 = triton_helpers.maximum(tmp39, tmp34)
tmp41 = tmp38 + tmp40
tmp42 = tmp5 - tmp32
tmp43 = triton_helpers.maximum(tmp42, tmp34)
tmp44 = tmp41 + tmp43
tmp45 = tmp44 - tmp9
tmp46 = tmp0 - tmp10
tmp47 = triton_helpers.maximum(tmp46, tmp34)
tmp48 = tmp1 - tmp10
tmp49 = triton_helpers.maximum(tmp48, tmp34)
tmp50 = tmp47 + tmp49
tmp51 = tmp3 - tmp10
tmp52 = triton_helpers.maximum(tmp51, tmp34)
tmp53 = tmp50 + tmp52
tmp54 = tmp5 - tmp10
tmp55 = triton_helpers.maximum(tmp54, tmp34)
tmp56 = tmp53 + tmp55
tmp57 = tmp56 - tmp9
tmp58 = tmp45 * tmp57
tmp59 = tmp58 >= tmp34
tmp60 = tl.where(tmp59, tmp32, tmp31)
tmp61 = tmp30 * tmp12
tmp62 = tmp60 + tmp61
tmp63 = tmp0 - tmp62
tmp64 = triton_helpers.maximum(tmp63, tmp34)
tmp65 = tmp1 - tmp62
tmp66 = triton_helpers.maximum(tmp65, tmp34)
tmp67 = tmp64 + tmp66
tmp68 = tmp3 - tmp62
tmp69 = triton_helpers.maximum(tmp68, tmp34)
tmp70 = tmp67 + tmp69
tmp71 = tmp5 - tmp62
tmp72 = triton_helpers.maximum(tmp71, tmp34)
tmp73 = tmp70 + tmp72
tmp74 = tmp73 - tmp9
tmp75 = tmp74 * tmp57
tmp76 = tmp75 >= tmp34
tmp77 = tl.where(tmp76, tmp62, tmp60)
tmp78 = tmp61 * tmp12
tmp79 = tmp77 + tmp78
tmp80 = tmp0 - tmp79
tmp81 = triton_helpers.maximum(tmp80, tmp34)
tmp82 = tmp1 - tmp79
tmp83 = triton_helpers.maximum(tmp82, tmp34)
tmp84 = tmp81 + tmp83
tmp85 = tmp3 - tmp79
tmp86 = triton_helpers.maximum(tmp85, tmp34)
tmp87 = tmp84 + tmp86
tmp88 = tmp5 - tmp79
tmp89 = triton_helpers.maximum(tmp88, tmp34)
tmp90 = tmp87 + tmp89
tmp91 = tmp90 - tmp9
tmp92 = tmp91 * tmp57
tmp93 = tmp92 >= tmp34
tmp94 = tl.where(tmp93, tmp79, tmp77)
tmp95 = tmp78 * tmp12
tmp96 = tmp94 + tmp95
tmp97 = tmp0 - tmp96
tmp98 = triton_helpers.maximum(tmp97, tmp34)
tmp99 = tmp1 - tmp96
tmp100 = triton_helpers.maximum(tmp99, tmp34)
tmp101 = tmp98 + tmp100
tmp102 = tmp3 - tmp96
tmp103 = triton_helpers.maximum(tmp102, tmp34)
tmp104 = tmp101 + tmp103
tmp105 = tmp5 - tmp96
tmp106 = triton_helpers.maximum(tmp105, tmp34)
tmp107 = tmp104 + tmp106
tmp108 = tmp107 - tmp9
tmp109 = tmp108 * tmp57
tmp110 = tmp109 >= tmp34
tmp111 = tl.where(tmp110, tmp96, tmp94)
tmp112 = tmp95 * tmp12
tmp113 = tmp111 + tmp112
tmp114 = tmp0 - tmp113
tmp115 = triton_helpers.maximum(tmp114, tmp34)
tmp116 = tmp1 - tmp113
tmp117 = triton_helpers.maximum(tmp116, tmp34)
tmp118 = tmp115 + tmp117
tmp119 = tmp3 - tmp113
tmp120 = triton_helpers.maximum(tmp119, tmp34)
tmp121 = tmp118 + tmp120
tmp122 = tmp5 - tmp113
tmp123 = triton_helpers.maximum(tmp122, tmp34)
tmp124 = tmp121 + tmp123
tmp125 = tmp124 - tmp9
tmp126 = tmp125 * tmp57
tmp127 = tmp126 >= tmp34
tmp128 = tl.where(tmp127, tmp113, tmp111)
tmp129 = tmp112 * tmp12
tmp130 = tmp128 + tmp129
tmp131 = tmp0 - tmp130
tmp132 = triton_helpers.maximum(tmp131, tmp34)
tmp133 = tmp1 - tmp130
tmp134 = triton_helpers.maximum(tmp133, tmp34)
tmp135 = tmp132 + tmp134
tmp136 = tmp3 - tmp130
tmp137 = triton_helpers.maximum(tmp136, tmp34)
tmp138 = tmp135 + tmp137
tmp139 = tmp5 - tmp130
tmp140 = triton_helpers.maximum(tmp139, tmp34)
tmp141 = tmp138 + tmp140
tmp142 = tmp141 - tmp9
tmp143 = tmp142 * tmp57
tmp144 = tmp143 >= tmp34
tmp145 = tl.where(tmp144, tmp130, tmp128)
tmp146 = tmp129 * tmp12
tmp147 = tmp145 + tmp146
tmp148 = tmp0 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp34)
tmp150 = tmp1 - tmp147
tmp151 = triton_helpers.maximum(tmp150, tmp34)
tmp152 = tmp149 + tmp151
tmp153 = tmp3 - tmp147
tmp154 = triton_helpers.maximum(tmp153, tmp34)
tmp155 = tmp152 + tmp154
tmp156 = tmp5 - tmp147
tmp157 = triton_helpers.maximum(tmp156, tmp34)
tmp158 = tmp155 + tmp157
tmp159 = tmp158 - tmp9
tmp160 = tmp159 * tmp57
tmp161 = tmp160 >= tmp34
tmp162 = tl.where(tmp161, tmp147, tmp145)
tmp163 = tmp146 * tmp12
tmp164 = tmp162 + tmp163
tmp165 = tmp0 - tmp164
tmp166 = triton_helpers.maximum(tmp165, tmp34)
tmp167 = tmp1 - tmp164
tmp168 = triton_helpers.maximum(tmp167, tmp34)
tmp169 = tmp166 + tmp168
tmp170 = tmp3 - tmp164
tmp171 = triton_helpers.maximum(tmp170, tmp34)
tmp172 = tmp169 + tmp171
tmp173 = tmp5 - tmp164
tmp174 = triton_helpers.maximum(tmp173, tmp34)
tmp175 = tmp172 + tmp174
tmp176 = tmp175 - tmp9
tmp177 = tmp176 * tmp57
tmp178 = tmp177 >= tmp34
tmp179 = tl.where(tmp178, tmp164, tmp162)
tmp180 = tmp163 * tmp12
tmp181 = tmp179 + tmp180
tmp182 = tmp0 - tmp181
tmp183 = triton_helpers.maximum(tmp182, tmp34)
tmp184 = tmp1 - tmp181
tmp185 = triton_helpers.maximum(tmp184, tmp34)
tmp186 = tmp183 + tmp185
tmp187 = tmp3 - tmp181
tmp188 = triton_helpers.maximum(tmp187, tmp34)
tmp189 = tmp186 + tmp188
tmp190 = tmp5 - tmp181
tmp191 = triton_helpers.maximum(tmp190, tmp34)
tmp192 = tmp189 + tmp191
tmp193 = tmp192 - tmp9
tmp194 = tmp193 * tmp57
tmp195 = tmp194 >= tmp34
tmp196 = tl.where(tmp195, tmp181, tmp179)
tmp197 = tmp180 * tmp12
tmp198 = tmp196 + tmp197
tmp199 = tmp0 - tmp198
tmp200 = triton_helpers.maximum(tmp199, tmp34)
tmp201 = tmp1 - tmp198
tmp202 = triton_helpers.maximum(tmp201, tmp34)
tmp203 = tmp200 + tmp202
tmp204 = tmp3 - tmp198
tmp205 = triton_helpers.maximum(tmp204, tmp34)
tmp206 = tmp203 + tmp205
tmp207 = tmp5 - tmp198
tmp208 = triton_helpers.maximum(tmp207, tmp34)
tmp209 = tmp206 + tmp208
tmp210 = tmp209 - tmp9
tmp211 = tmp210 * tmp57
tmp212 = tmp211 >= tmp34
tmp213 = tl.where(tmp212, tmp198, tmp196)
tmp214 = tmp197 * tmp12
tmp215 = tmp213 + tmp214
tmp216 = tmp0 - tmp215
tmp217 = triton_helpers.maximum(tmp216, tmp34)
tmp218 = tmp1 - tmp215
tmp219 = triton_helpers.maximum(tmp218, tmp34)
tmp220 = tmp217 + tmp219
tmp221 = tmp3 - tmp215
tmp222 = triton_helpers.maximum(tmp221, tmp34)
tmp223 = tmp220 + tmp222
tmp224 = tmp5 - tmp215
tmp225 = triton_helpers.maximum(tmp224, tmp34)
tmp226 = tmp223 + tmp225
tmp227 = tmp226 - tmp9
tmp228 = tmp227 * tmp57
tmp229 = tmp228 >= tmp34
tmp230 = tl.where(tmp229, tmp215, tmp213)
tmp231 = tmp214 * tmp12
tmp232 = tmp230 + tmp231
tmp233 = tmp0 - tmp232
tmp234 = triton_helpers.maximum(tmp233, tmp34)
tmp235 = tmp1 - tmp232
tmp236 = triton_helpers.maximum(tmp235, tmp34)
tmp237 = tmp234 + tmp236
tmp238 = tmp3 - tmp232
tmp239 = triton_helpers.maximum(tmp238, tmp34)
tmp240 = tmp237 + tmp239
tmp241 = tmp5 - tmp232
tmp242 = triton_helpers.maximum(tmp241, tmp34)
tmp243 = tmp240 + tmp242
tmp244 = tmp243 - tmp9
tmp245 = tmp244 * tmp57
tmp246 = tmp245 >= tmp34
tmp247 = tl.where(tmp246, tmp232, tmp230)
tmp248 = tmp231 * tmp12
tmp249 = tmp247 + tmp248
tmp250 = tmp0 - tmp249
tmp251 = triton_helpers.maximum(tmp250, tmp34)
tmp252 = tmp1 - tmp249
tmp253 = triton_helpers.maximum(tmp252, tmp34)
tmp254 = tmp251 + tmp253
tmp255 = tmp3 - tmp249
tmp256 = triton_helpers.maximum(tmp255, tmp34)
tmp257 = tmp254 + tmp256
tmp258 = tmp5 - tmp249
tmp259 = triton_helpers.maximum(tmp258, tmp34)
tmp260 = tmp257 + tmp259
tmp261 = tmp260 - tmp9
tmp262 = tmp261 * tmp57
tmp263 = tmp262 >= tmp34
tmp264 = tl.where(tmp263, tmp249, tmp247)
tmp265 = tmp248 * tmp12
tmp266 = tmp264 + tmp265
tmp267 = tmp0 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp34)
tmp269 = tmp1 - tmp266
tmp270 = triton_helpers.maximum(tmp269, tmp34)
tmp271 = tmp268 + tmp270
tmp272 = tmp3 - tmp266
tmp273 = triton_helpers.maximum(tmp272, tmp34)
tmp274 = tmp271 + tmp273
tmp275 = tmp5 - tmp266
tmp276 = triton_helpers.maximum(tmp275, tmp34)
tmp277 = tmp274 + tmp276
tmp278 = tmp277 - tmp9
tmp279 = tmp278 * tmp57
tmp280 = tmp279 >= tmp34
tmp281 = tl.where(tmp280, tmp266, tmp264)
tmp282 = tmp265 * tmp12
tmp283 = tmp281 + tmp282
tmp284 = tmp0 - tmp283
tmp285 = triton_helpers.maximum(tmp284, tmp34)
tmp286 = tmp1 - tmp283
tmp287 = triton_helpers.maximum(tmp286, tmp34)
tmp288 = tmp285 + tmp287
tmp289 = tmp3 - tmp283
tmp290 = triton_helpers.maximum(tmp289, tmp34)
tmp291 = tmp288 + tmp290
tmp292 = tmp5 - tmp283
tmp293 = triton_helpers.maximum(tmp292, tmp34)
tmp294 = tmp291 + tmp293
tmp295 = tmp294 - tmp9
tmp296 = tmp295 * tmp57
tmp297 = tmp296 >= tmp34
tmp298 = tl.where(tmp297, tmp283, tmp281)
tmp299 = tmp282 * tmp12
tmp300 = tmp298 + tmp299
tmp301 = tmp0 - tmp300
tmp302 = triton_helpers.maximum(tmp301, tmp34)
tmp303 = tmp1 - tmp300
tmp304 = triton_helpers.maximum(tmp303, tmp34)
tmp305 = tmp302 + tmp304
tmp306 = tmp3 - tmp300
tmp307 = triton_helpers.maximum(tmp306, tmp34)
tmp308 = tmp305 + tmp307
tmp309 = tmp5 - tmp300
tmp310 = triton_helpers.maximum(tmp309, tmp34)
tmp311 = tmp308 + tmp310
tmp312 = tmp311 - tmp9
tmp313 = tmp312 * tmp57
tmp314 = tmp313 >= tmp34
tmp315 = tl.where(tmp314, tmp300, tmp298)
tmp316 = tmp299 * tmp12
tmp317 = tmp315 + tmp316
tmp318 = tmp0 - tmp317
tmp319 = triton_helpers.maximum(tmp318, tmp34)
tmp320 = tmp1 - tmp317
tmp321 = triton_helpers.maximum(tmp320, tmp34)
tmp322 = tmp319 + tmp321
tmp323 = tmp3 - tmp317
tmp324 = triton_helpers.maximum(tmp323, tmp34)
tmp325 = tmp322 + tmp324
tmp326 = tmp5 - tmp317
tmp327 = triton_helpers.maximum(tmp326, tmp34)
tmp328 = tmp325 + tmp327
tmp329 = tmp328 - tmp9
tmp330 = tmp329 * tmp57
tmp331 = tmp330 >= tmp34
tmp332 = tl.where(tmp331, tmp317, tmp315)
tmp333 = tmp316 * tmp12
tmp334 = tmp332 + tmp333
tmp335 = tmp0 - tmp334
tmp336 = triton_helpers.maximum(tmp335, tmp34)
tmp337 = tmp1 - tmp334
tmp338 = triton_helpers.maximum(tmp337, tmp34)
tmp339 = tmp336 + tmp338
tmp340 = tmp3 - tmp334
tmp341 = triton_helpers.maximum(tmp340, tmp34)
tmp342 = tmp339 + tmp341
tmp343 = tmp5 - tmp334
tmp344 = triton_helpers.maximum(tmp343, tmp34)
tmp345 = tmp342 + tmp344
tmp346 = tmp345 - tmp9
tmp347 = tmp346 * tmp57
tmp348 = tmp347 >= tmp34
tmp349 = tl.where(tmp348, tmp334, tmp332)
tmp350 = tmp333 * tmp12
tmp351 = tmp349 + tmp350
tmp352 = tmp0 - tmp351
tmp353 = triton_helpers.maximum(tmp352, tmp34)
tmp354 = tmp1 - tmp351
tmp355 = triton_helpers.maximum(tmp354, tmp34)
tmp356 = tmp353 + tmp355
tmp357 = tmp3 - tmp351
tmp358 = triton_helpers.maximum(tmp357, tmp34)
tmp359 = tmp356 + tmp358
tmp360 = tmp5 - tmp351
tmp361 = triton_helpers.maximum(tmp360, tmp34)
tmp362 = tmp359 + tmp361
tmp363 = tmp362 - tmp9
tmp364 = tmp363 * tmp57
tmp365 = tmp364 >= tmp34
tmp366 = tl.where(tmp365, tmp351, tmp349)
tmp367 = tmp350 * tmp12
tmp368 = tmp366 + tmp367
tmp369 = tmp0 - tmp368
tmp370 = triton_helpers.maximum(tmp369, tmp34)
tmp371 = tmp1 - tmp368
tmp372 = triton_helpers.maximum(tmp371, tmp34)
tmp373 = tmp370 + tmp372
tmp374 = tmp3 - tmp368
tmp375 = triton_helpers.maximum(tmp374, tmp34)
tmp376 = tmp373 + tmp375
tmp377 = tmp5 - tmp368
tmp378 = triton_helpers.maximum(tmp377, tmp34)
tmp379 = tmp376 + tmp378
tmp380 = tmp379 - tmp9
tmp381 = tmp380 * tmp57
tmp382 = tmp381 >= tmp34
tmp383 = tl.where(tmp382, tmp368, tmp366)
tmp384 = tmp367 * tmp12
tmp385 = tmp383 + tmp384
tmp386 = tmp0 - tmp385
tmp387 = triton_helpers.maximum(tmp386, tmp34)
tmp388 = tmp1 - tmp385
tmp389 = triton_helpers.maximum(tmp388, tmp34)
tmp390 = tmp387 + tmp389
tmp391 = tmp3 - tmp385
tmp392 = triton_helpers.maximum(tmp391, tmp34)
tmp393 = tmp390 + tmp392
tmp394 = tmp5 - tmp385
tmp395 = triton_helpers.maximum(tmp394, tmp34)
tmp396 = tmp393 + tmp395
tmp397 = tmp396 - tmp9
tmp398 = tmp397 * tmp57
tmp399 = tmp398 >= tmp34
tmp400 = tl.where(tmp399, tmp385, tmp383)
tmp401 = tmp384 * tmp12
tmp402 = tmp400 + tmp401
tmp403 = tmp0 - tmp402
tmp404 = triton_helpers.maximum(tmp403, tmp34)
tmp405 = tmp1 - tmp402
tmp406 = triton_helpers.maximum(tmp405, tmp34)
tmp407 = tmp404 + tmp406
tmp408 = tmp3 - tmp402
tmp409 = triton_helpers.maximum(tmp408, tmp34)
tmp410 = tmp407 + tmp409
tmp411 = tmp5 - tmp402
tmp412 = triton_helpers.maximum(tmp411, tmp34)
tmp413 = tmp410 + tmp412
tmp414 = tmp413 - tmp9
tmp415 = tmp414 * tmp57
tmp416 = tmp415 >= tmp34
tmp417 = tl.where(tmp416, tmp402, tmp400)
tmp418 = tmp401 * tmp12
tmp419 = tmp417 + tmp418
tmp420 = tmp0 - tmp419
tmp421 = triton_helpers.maximum(tmp420, tmp34)
tmp422 = tmp1 - tmp419
tmp423 = triton_helpers.maximum(tmp422, tmp34)
tmp424 = tmp421 + tmp423
tmp425 = tmp3 - tmp419
tmp426 = triton_helpers.maximum(tmp425, tmp34)
tmp427 = tmp424 + tmp426
tmp428 = tmp5 - tmp419
tmp429 = triton_helpers.maximum(tmp428, tmp34)
tmp430 = tmp427 + tmp429
tmp431 = tmp430 - tmp9
tmp432 = tmp431 * tmp57
tmp433 = tmp432 >= tmp34
tmp434 = tl.where(tmp433, tmp419, tmp417)
tl.store(out_ptr0 + x0, tmp30, xmask)
tl.store(in_out_ptr16 + x0, tmp434, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp1 + tmp26
tmp28 = tmp0 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_out_ptr0 + x0, xmask)
tmp33 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp32 + tmp57
tmp59 = tl.where(tmp31, tmp58, tmp32)
tl.store(in_out_ptr0 + x0, tmp59, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp1 + tmp27
tmp29 = tmp0 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_out_ptr0 + x0, xmask)
tmp33 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp57 * tmp34
tmp59 = tmp32 + tmp58
tmp60 = tl.where(tmp31, tmp59, tmp32)
tl.store(in_out_ptr0 + x0, tmp60, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp1 + tmp28
tmp30 = tmp0 - tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_out_ptr0 + x0, xmask)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp36 + tmp63
tmp65 = tl.where(tmp35, tmp64, tmp36)
tl.store(in_out_ptr0 + x0, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp28 * tmp3
tmp30 = tmp1 + tmp29
tmp31 = tmp0 - tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9(in_out_ptr0,
in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr7, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_out_ptr0 + x0, xmask)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp63 * tmp38
tmp65 = tmp36 + tmp64
tmp66 = tl.where(tmp35, tmp65, tmp36)
tmp67 = tmp64 * tmp38
tmp68 = tmp66 + tmp67
tmp69 = tmp14 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = tmp15 - tmp68
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = tmp70 + tmp72
tmp74 = tmp17 - tmp68
tmp75 = triton_helpers.maximum(tmp74, tmp1)
tmp76 = tmp73 + tmp75
tmp77 = tmp19 - tmp68
tmp78 = triton_helpers.maximum(tmp77, tmp1)
tmp79 = tmp76 + tmp78
tmp80 = tmp79 - tmp12
tmp81 = tmp80 * tmp33
tmp82 = tmp81 >= tmp1
tmp83 = tl.where(tmp82, tmp68, tmp66)
tmp84 = tmp67 * tmp38
tmp85 = tmp83 + tmp84
tmp86 = tmp14 - tmp85
tmp87 = triton_helpers.maximum(tmp86, tmp1)
tmp88 = tmp15 - tmp85
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = tmp87 + tmp89
tmp91 = tmp17 - tmp85
tmp92 = triton_helpers.maximum(tmp91, tmp1)
tmp93 = tmp90 + tmp92
tmp94 = tmp19 - tmp85
tmp95 = triton_helpers.maximum(tmp94, tmp1)
tmp96 = tmp93 + tmp95
tmp97 = tmp96 - tmp12
tmp98 = tmp97 * tmp33
tmp99 = tmp98 >= tmp1
tmp100 = tl.where(tmp99, tmp85, tmp83)
tmp101 = tmp84 * tmp38
tmp102 = tmp100 + tmp101
tmp103 = tmp14 - tmp102
tmp104 = triton_helpers.maximum(tmp103, tmp1)
tmp105 = tmp15 - tmp102
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = tmp104 + tmp106
tmp108 = tmp17 - tmp102
tmp109 = triton_helpers.maximum(tmp108, tmp1)
tmp110 = tmp107 + tmp109
tmp111 = tmp19 - tmp102
tmp112 = triton_helpers.maximum(tmp111, tmp1)
tmp113 = tmp110 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp33
tmp116 = tmp115 >= tmp1
tmp117 = tl.where(tmp116, tmp102, tmp100)
tmp118 = tmp101 * tmp38
tmp119 = tmp117 + tmp118
tmp120 = tmp14 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp1)
tmp122 = tmp15 - tmp119
tmp123 = triton_helpers.maximum(tmp122, tmp1)
tmp124 = tmp121 + tmp123
tmp125 = tmp17 - tmp119
tmp126 = triton_helpers.maximum(tmp125, tmp1)
tmp127 = tmp124 + tmp126
tmp128 = tmp19 - tmp119
tmp129 = triton_helpers.maximum(tmp128, tmp1)
tmp130 = tmp127 + tmp129
tmp131 = tmp130 - tmp12
tmp132 = tmp131 * tmp33
tmp133 = tmp132 >= tmp1
tmp134 = tl.where(tmp133, tmp119, tmp117)
tmp135 = tmp118 * tmp38
tmp136 = tmp134 + tmp135
tmp137 = tmp14 - tmp136
tmp138 = triton_helpers.maximum(tmp137, tmp1)
tmp139 = tmp15 - tmp136
tmp140 = triton_helpers.maximum(tmp139, tmp1)
tmp141 = tmp138 + tmp140
tmp142 = tmp17 - tmp136
tmp143 = triton_helpers.maximum(tmp142, tmp1)
tmp144 = tmp141 + tmp143
tmp145 = tmp19 - tmp136
tmp146 = triton_helpers.maximum(tmp145, tmp1)
tmp147 = tmp144 + tmp146
tmp148 = tmp147 - tmp12
tmp149 = tmp148 * tmp33
tmp150 = tmp149 >= tmp1
tmp151 = tl.where(tmp150, tmp136, tmp134)
tmp152 = tmp135 * tmp38
tmp153 = tmp151 + tmp152
tmp154 = tmp14 - tmp153
tmp155 = triton_helpers.maximum(tmp154, tmp1)
tmp156 = tmp15 - tmp153
tmp157 = triton_helpers.maximum(tmp156, tmp1)
tmp158 = tmp155 + tmp157
tmp159 = tmp17 - tmp153
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = tmp158 + tmp160
tmp162 = tmp19 - tmp153
tmp163 = triton_helpers.maximum(tmp162, tmp1)
tmp164 = tmp161 + tmp163
tl.store(out_ptr4 + x0, tmp101, xmask)
tl.store(in_out_ptr2 + x0, tmp151, xmask)
tl.store(out_ptr7 + x0, tmp164, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp1 + tmp6
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf40
get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0[grid(64)](buf41,
arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf82 = reinterpret_tensor(buf81, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf81
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1[grid(64)](buf82,
arg0_1, buf41, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf83 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_sub_2[grid(256)](arg0_1, buf82,
buf42, buf83, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf85 = buf82
del buf82
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3[grid(64)](buf85,
buf83, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf86 = buf83
del buf83
triton_poi_fused_add_clamp_div_sub_4[grid(256)](arg0_1, buf85,
buf42, buf86, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf88 = buf85
del buf85
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5[grid(64)](buf88,
buf86, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf89 = buf86
del buf86
triton_poi_fused_add_div_sub_6[grid(256)](arg0_1, buf88, buf42,
buf89, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf91 = buf88
del buf88
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7[grid(64)](buf91,
buf89, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf92 = buf89
del buf89
triton_poi_fused_add_div_sub_8[grid(256)](arg0_1, buf91, buf42,
buf92, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf94 = buf91
del buf91
buf100 = buf41
del buf41
buf103 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf104 = reinterpret_tensor(buf103, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf103
buf105 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9[grid(64)](buf94,
buf104, buf92, arg0_1, buf42, buf100, buf105, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf42
del buf94
buf106 = buf92
del buf92
triton_poi_fused_add_clamp_div_sub_10[grid(256)](arg0_1, buf104,
buf100, buf105, buf106, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf100
del buf104
del buf105
return buf106,
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):
"""sparsemax: normalizing sparse transform (a la softmax), via bisection.
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Note: This function does not yet support normalizing along anything except
the last dimension. Please use transposing and views to achieve more
general behavior.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class SparsemaxBisectFunction(EntmaxBisectFunction):
@classmethod
def _gp(cls, x, alpha):
return x
@classmethod
def _gp_inv(cls, y, alpha):
return y
@classmethod
def _p(cls, x, alpha):
return torch.clamp(x, min=0)
@classmethod
def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True):
return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50,
ensure_sum_one=True)
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y > 0
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None, None
class SparsemaxBisectNew(nn.Module):
def __init__(self, dim=-1, n_iter=None):
"""sparsemax: normalizing sparse transform (a la softmax) via bisection
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| antoniogois/entmax | SparsemaxBisect | false | 14,957 | [
"MIT"
]
| 298 | 7ff3fa6b09ee53e04514173aacae9de90c95ca75 | https://github.com/antoniogois/entmax/tree/7ff3fa6b09ee53e04514173aacae9de90c95ca75 |
FactorScalar | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yc/cycelhrr7d5b627wwjisqr4ziuy5dylqqlk7lgr32kypwamk5f6s.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FactorScalar(nn.Module):
def __init__(self, initial_value=1.0, **kwargs):
super().__init__()
self.factor = nn.Parameter(torch.tensor(initial_value))
def on_task_end(self):
pass
def on_epoch_end(self):
pass
def forward(self, inputs):
return self.factor * inputs
def __mul__(self, other):
return self.forward(other)
def __rmul__(self, other):
return self.forward(other)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class FactorScalarNew(nn.Module):
def __init__(self, initial_value=1.0, **kwargs):
super().__init__()
self.factor = nn.Parameter(torch.tensor(initial_value))
def on_task_end(self):
pass
def on_epoch_end(self):
pass
def __mul__(self, other):
return self.forward(other)
def __rmul__(self, other):
return self.forward(other)
def forward(self, input_0):
primals_1 = self.factor
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| billpsomas/incremental_learning.pytorch | FactorScalar | false | 14,958 | [
"MIT"
]
| 277 | a401a6609fc61c74698739cf937c0ece1c10913f | https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f |
InvertedFactorScalar | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ms/cms4vm56uqqf3qeffln4ey2idkisk4un63l4jdx7vlqh77sgbimy.py
# Topologically Sorted Source Nodes: [add, truediv, mul], Original ATen: [aten.add, aten.reciprocal, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul_1
# truediv => mul, reciprocal
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, 1e-07), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
triton_poi_fused_add_mul_reciprocal_0 = async_compile.triton('triton_poi_fused_add_mul_reciprocal_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_reciprocal_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = 1e-07
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp4 / tmp3
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, truediv, mul], Original ATen: [aten.add, aten.reciprocal, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_reciprocal_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class InvertedFactorScalar(nn.Module):
def __init__(self, initial_value=1.0, **kwargs):
super().__init__()
self._factor = nn.Parameter(torch.tensor(initial_value))
@property
def factor(self):
return 1 / (self._factor + 1e-07)
def on_task_end(self):
pass
def on_epoch_end(self):
pass
def forward(self, inputs):
return self.factor * inputs
def __mul__(self, other):
return self.forward(other)
def __rmul__(self, other):
return self.forward(other)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_reciprocal_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp2 = 1e-07
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp4 / tmp3
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_reciprocal_0[grid(256)](primals_1,
primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class InvertedFactorScalarNew(nn.Module):
def __init__(self, initial_value=1.0, **kwargs):
super().__init__()
self._factor = nn.Parameter(torch.tensor(initial_value))
@property
def factor(self):
return 1 / (self._factor + 1e-07)
def on_task_end(self):
pass
def on_epoch_end(self):
pass
def __mul__(self, other):
return self.forward(other)
def __rmul__(self, other):
return self.forward(other)
def forward(self, input_0):
primals_1 = self._factor
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| billpsomas/incremental_learning.pytorch | InvertedFactorScalar | false | 14,959 | [
"MIT"
]
| 277 | a401a6609fc61c74698739cf937c0ece1c10913f | https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f |
MinibatchStdLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3x/c3x6exjdpik3ljv6arzsf7qiilpyef6zlabvrfedbfhoafuxz3mw.py
# Topologically Sorted Source Nodes: [mean, y_2, mul, y_3, add, y_4, mean_2], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mean_2 => mean_2
# mul => mul
# y_2 => sub
# y_3 => mean_1
# y_4 => sqrt
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [0]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sqrt, [3], True), kwargs = {})
triton_poi_fused_add_mean_mul_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_mean_mul_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_sqrt_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (64 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (192 + (4*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (65 + (4*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (129 + (4*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (193 + (4*x0)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr0 + (66 + (4*x0)), xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr0 + (130 + (4*x0)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr0 + (194 + (4*x0)), xmask, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp71 = tl.load(in_ptr0 + (67 + (4*x0)), xmask, eviction_policy='evict_last')
tmp73 = tl.load(in_ptr0 + (131 + (4*x0)), xmask, eviction_policy='evict_last')
tmp75 = tl.load(in_ptr0 + (195 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-08
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp26 = tmp24 + tmp25
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp31 = tmp30 / tmp7
tmp32 = tmp24 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp25 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp29 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp7
tmp44 = tmp43 + tmp21
tmp45 = libdevice.sqrt(tmp44)
tmp46 = tmp23 + tmp45
tmp49 = tmp47 + tmp48
tmp51 = tmp49 + tmp50
tmp53 = tmp51 + tmp52
tmp54 = tmp53 / tmp7
tmp55 = tmp47 - tmp54
tmp56 = tmp55 * tmp55
tmp57 = tmp48 - tmp54
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tmp50 - tmp54
tmp61 = tmp60 * tmp60
tmp62 = tmp59 + tmp61
tmp63 = tmp52 - tmp54
tmp64 = tmp63 * tmp63
tmp65 = tmp62 + tmp64
tmp66 = tmp65 / tmp7
tmp67 = tmp66 + tmp21
tmp68 = libdevice.sqrt(tmp67)
tmp69 = tmp46 + tmp68
tmp72 = tmp70 + tmp71
tmp74 = tmp72 + tmp73
tmp76 = tmp74 + tmp75
tmp77 = tmp76 / tmp7
tmp78 = tmp70 - tmp77
tmp79 = tmp78 * tmp78
tmp80 = tmp71 - tmp77
tmp81 = tmp80 * tmp80
tmp82 = tmp79 + tmp81
tmp83 = tmp73 - tmp77
tmp84 = tmp83 * tmp83
tmp85 = tmp82 + tmp84
tmp86 = tmp75 - tmp77
tmp87 = tmp86 * tmp86
tmp88 = tmp85 + tmp87
tmp89 = tmp88 / tmp7
tmp90 = tmp89 + tmp21
tmp91 = libdevice.sqrt(tmp90)
tmp92 = tmp69 + tmp91
tmp93 = tmp92 / tmp7
tl.store(out_ptr0 + (x0), tmp93, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ec/cecyxpelmswvt6jol4othxdmnyya7plppttoa4vwbilp3hgixyvx.py
# Topologically Sorted Source Nodes: [mean_3, y_5], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean_3 => mean_3
# y_5 => mean_4
# Graph fragment:
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_2, [2], True), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_3, [1], True), kwargs = {})
triton_per_fused_mean_1 = async_compile.triton('triton_per_fused_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2g/c2gcliugfqrmhgxgfxkeop7kku2cer54jg4aqp257fhigudythgg.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %repeat], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nk/cnkvwdw4ef3xuihktlbv7cdw5pe3ugibirelzfpgpchyjshkw4l2.py
# Topologically Sorted Source Nodes: [mean_3, y_5, y_7], Original ATen: [aten.mean, aten.repeat]
# Source node to ATen node mapping:
# mean_3 => mean_3
# y_5 => mean_4
# y_7 => repeat
# Graph fragment:
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_2, [2], True), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_3, [1], True), kwargs = {})
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%mean_4, [4, 1, 4, 4]), kwargs = {})
triton_poi_fused_mean_repeat_3 = async_compile.triton('triton_poi_fused_mean_repeat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_repeat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 4.0
tmp3 = tmp1 / tmp2
tl.store(out_ptr0 + (x0 + (80*x1)), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 1), (16, 4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [mean, y_2, mul, y_3, add, y_4, mean_2], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add, aten.sqrt]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_mul_sqrt_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_3, y_5], Original ATen: [aten.mean]
triton_per_fused_mean_1.run(buf0, buf1, 1, 4, grid=grid(1), stream=stream0)
del buf0
buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias
# Topologically Sorted Source Nodes: [mean_3, y_5, y_7], Original ATen: [aten.mean, aten.repeat]
triton_poi_fused_mean_repeat_3.run(buf1, buf3, 64, grid=grid(64), stream=stream0)
del buf1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MinibatchStdLayer(nn.Module):
def __init__(self, group_size=4):
super().__init__()
self.group_size = group_size
def forward(self, x):
group_size = min(self.group_size, x.shape[0])
s = x.shape
y = x.view([group_size, -1, s[1], s[2], s[3]])
y = y.float()
y = y - torch.mean(y, dim=0, keepdim=True)
y = torch.mean(y * y, dim=0)
y = torch.sqrt(y + 1e-08)
y = torch.mean(torch.mean(torch.mean(y, axis=3, keepdim=True), axis
=2, keepdim=True), axis=1, keepdim=True)
y = y.type(x.type())
y = y.repeat(group_size, 1, s[2], s[3])
return torch.cat([x, y], axis=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_mul_sqrt_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (64 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (128 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (192 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (65 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (129 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (193 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr0 + (66 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (130 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp52 = tl.load(in_ptr0 + (194 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp70 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp71 = tl.load(in_ptr0 + (67 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tl.load(in_ptr0 + (131 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp75 = tl.load(in_ptr0 + (195 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-08
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp26 = tmp24 + tmp25
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp31 = tmp30 / tmp7
tmp32 = tmp24 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp25 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp29 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp7
tmp44 = tmp43 + tmp21
tmp45 = libdevice.sqrt(tmp44)
tmp46 = tmp23 + tmp45
tmp49 = tmp47 + tmp48
tmp51 = tmp49 + tmp50
tmp53 = tmp51 + tmp52
tmp54 = tmp53 / tmp7
tmp55 = tmp47 - tmp54
tmp56 = tmp55 * tmp55
tmp57 = tmp48 - tmp54
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tmp50 - tmp54
tmp61 = tmp60 * tmp60
tmp62 = tmp59 + tmp61
tmp63 = tmp52 - tmp54
tmp64 = tmp63 * tmp63
tmp65 = tmp62 + tmp64
tmp66 = tmp65 / tmp7
tmp67 = tmp66 + tmp21
tmp68 = libdevice.sqrt(tmp67)
tmp69 = tmp46 + tmp68
tmp72 = tmp70 + tmp71
tmp74 = tmp72 + tmp73
tmp76 = tmp74 + tmp75
tmp77 = tmp76 / tmp7
tmp78 = tmp70 - tmp77
tmp79 = tmp78 * tmp78
tmp80 = tmp71 - tmp77
tmp81 = tmp80 * tmp80
tmp82 = tmp79 + tmp81
tmp83 = tmp73 - tmp77
tmp84 = tmp83 * tmp83
tmp85 = tmp82 + tmp84
tmp86 = tmp75 - tmp77
tmp87 = tmp86 * tmp86
tmp88 = tmp85 + tmp87
tmp89 = tmp88 / tmp7
tmp90 = tmp89 + tmp21
tmp91 = libdevice.sqrt(tmp90)
tmp92 = tmp69 + tmp91
tmp93 = tmp92 / tmp7
tl.store(out_ptr0 + x0, tmp93, xmask)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_mean_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 4.0
tmp3 = tmp1 / tmp2
tl.store(out_ptr0 + (x0 + 80 * x1), tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 1), (16, 4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_sqrt_sub_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_per_fused_mean_1[grid(1)](buf0, buf1, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_2[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64)
triton_poi_fused_mean_repeat_3[grid(64)](buf1, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
return buf4,
class MinibatchStdLayerNew(nn.Module):
def __init__(self, group_size=4):
super().__init__()
self.group_size = group_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bg459/gan-ensembling-loader | MinibatchStdLayer | false | 14,960 | [
"MIT"
]
| 86 | 5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8 | https://github.com/bg459/gan-ensembling-loader/tree/5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8 |
LinearModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gv/cgvpra4kwn5idcnpg33dwbcypnrjm2z2np7rzbwqwz3kurrqhisn.py
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp1 * tmp2
tmp6 = tmp3 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_3
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LinearModel(nn.Module):
"""Linear model applying on the logits alpha * x + beta.
By default, this model is initialized as an identity operation.
See https://arxiv.org/abs/1905.13260 for an example usage.
:param alpha: A learned scalar.
:param beta: A learned scalar.
"""
def __init__(self, alpha=1.0, beta=0.0):
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha))
self.beta = nn.Parameter(torch.tensor(beta))
def forward(self, inputs):
return self.alpha * inputs + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp1 * tmp2
tmp6 = tmp3 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class LinearModelNew(nn.Module):
"""Linear model applying on the logits alpha * x + beta.
By default, this model is initialized as an identity operation.
See https://arxiv.org/abs/1905.13260 for an example usage.
:param alpha: A learned scalar.
:param beta: A learned scalar.
"""
def __init__(self, alpha=1.0, beta=0.0):
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha))
self.beta = nn.Parameter(torch.tensor(beta))
def forward(self, input_0):
primals_1 = self.alpha
primals_3 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| billpsomas/incremental_learning.pytorch | LinearModel | false | 14,961 | [
"MIT"
]
| 277 | a401a6609fc61c74698739cf937c0ece1c10913f | https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f |
WeightedL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hs/chs2gvocblpzshf7yazcw4fsccqti734wg4gdyu42tkvodvztw2w.py
# Topologically Sorted Source Nodes: [isnan, target, diff, loss], Original ATen: [aten.isnan, aten.where, aten.sub, aten.abs]
# Source node to ATen node mapping:
# diff => sub
# isnan => isnan
# loss => abs_1
# target => where
# Graph fragment:
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%arg0_1,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %arg1_1, %arg0_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %where), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
triton_poi_fused_abs_isnan_sub_where_0 = async_compile.triton('triton_poi_fused_abs_isnan_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_isnan_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_isnan_sub_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = libdevice.isnan(tmp1).to(tl.int1)
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.abs(tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [isnan, target, diff, loss], Original ATen: [aten.isnan, aten.where, aten.sub, aten.abs]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_isnan_sub_where_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: 'list'=None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
self.code_weights = code_weights
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights)
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'=None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target)
diff = input - target
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1
] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_isnan_sub_where_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = libdevice.isnan(tmp1).to(tl.int1)
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.abs(tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_isnan_sub_where_0[grid(256)](arg1_1, arg0_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class WeightedL1LossNew(nn.Module):
def __init__(self, code_weights: 'list'=None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1LossNew, self).__init__()
self.code_weights = code_weights
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| blakechen97/SASA | WeightedL1Loss | false | 14,962 | [
"Apache-2.0"
]
| 46 | cd79f60e923242590b64cb0cc70203a524e7e9a7 | https://github.com/blakechen97/SASA/tree/cd79f60e923242590b64cb0cc70203a524e7e9a7 |
TransposeLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cf/ccftx7haxgyxp2rxkc23usg2pboxl5ewmlwa67bacgt2i2keing2.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 4, 4, grid=grid(4, 4), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class TransposeLayer(torch.nn.Module):
"""Transpose the input."""
def forward(self, data):
return data.t().contiguous()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4, 4)](arg0_1, buf0, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class TransposeLayerNew(torch.nn.Module):
"""Transpose the input."""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bolajiy/beer | TransposeLayer | false | 14,963 | [
"MIT"
]
| 46 | 6fe968c7ca4864437890aa6bd705755c2580696e | https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e |
WeightedBinaryCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ey/ceyqqenexhsxyru7ukq7zfr4xzb6mxbrdt64aal3eguh4r327aeh.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# mean => mean
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_2, [-1]), kwargs = {})
triton_poi_fused_binary_cross_entropy_with_logits_mean_0 = async_compile.triton('triton_poi_fused_binary_cross_entropy_with_logits_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_binary_cross_entropy_with_logits_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp14 = tmp1 - tmp13
tmp16 = tmp14 * tmp15
tmp17 = triton_helpers.minimum(tmp5, tmp15)
tmp18 = tl_math.abs(tmp15)
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = libdevice.log1p(tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp16 - tmp22
tmp24 = tmp12 + tmp23
tmp26 = tmp1 - tmp25
tmp28 = tmp26 * tmp27
tmp29 = triton_helpers.minimum(tmp5, tmp27)
tmp30 = tl_math.abs(tmp27)
tmp31 = -tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = libdevice.log1p(tmp32)
tmp34 = tmp29 - tmp33
tmp35 = tmp28 - tmp34
tmp36 = tmp24 + tmp35
tmp38 = tmp1 - tmp37
tmp40 = tmp38 * tmp39
tmp41 = triton_helpers.minimum(tmp5, tmp39)
tmp42 = tl_math.abs(tmp39)
tmp43 = -tmp42
tmp44 = tl_math.exp(tmp43)
tmp45 = libdevice.log1p(tmp44)
tmp46 = tmp41 - tmp45
tmp47 = tmp40 - tmp46
tmp48 = tmp36 + tmp47
tmp49 = 4.0
tmp50 = tmp48 / tmp49
tl.store(out_ptr0 + (x0), tmp50, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/l3/cl3brlv6v2qpk3opkohktvujrz6abpkv6mtdghb7jpyvg4eoxp2f.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# loss => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, %arg2_1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_mean_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf0, arg2_1, buf1, 256, grid=grid(256), stream=stream0)
del arg2_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class WeightedBinaryCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedBinaryCrossEntropyLoss, self).__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
loss = F.binary_cross_entropy_with_logits(input, target, reduction=
'none').mean(dim=-1) * weights
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_mean_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp39 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp14 = tmp1 - tmp13
tmp16 = tmp14 * tmp15
tmp17 = triton_helpers.minimum(tmp5, tmp15)
tmp18 = tl_math.abs(tmp15)
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = libdevice.log1p(tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp16 - tmp22
tmp24 = tmp12 + tmp23
tmp26 = tmp1 - tmp25
tmp28 = tmp26 * tmp27
tmp29 = triton_helpers.minimum(tmp5, tmp27)
tmp30 = tl_math.abs(tmp27)
tmp31 = -tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = libdevice.log1p(tmp32)
tmp34 = tmp29 - tmp33
tmp35 = tmp28 - tmp34
tmp36 = tmp24 + tmp35
tmp38 = tmp1 - tmp37
tmp40 = tmp38 * tmp39
tmp41 = triton_helpers.minimum(tmp5, tmp39)
tmp42 = tl_math.abs(tmp39)
tmp43 = -tmp42
tmp44 = tl_math.exp(tmp43)
tmp45 = libdevice.log1p(tmp44)
tmp46 = tmp41 - tmp45
tmp47 = tmp40 - tmp46
tmp48 = tmp36 + tmp47
tmp49 = 4.0
tmp50 = tmp48 / tmp49
tl.store(out_ptr0 + x0, tmp50, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_mean_0[grid(64)](
arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(256)](buf0, arg2_1, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg2_1
del buf0
return buf1,
class WeightedBinaryCrossEntropyLossNew(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedBinaryCrossEntropyLossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| blakechen97/SASA | WeightedBinaryCrossEntropyLoss | false | 14,964 | [
"Apache-2.0"
]
| 46 | cd79f60e923242590b64cb0cc70203a524e7e9a7 | https://github.com/blakechen97/SASA/tree/cd79f60e923242590b64cb0cc70203a524e7e9a7 |
ResidualConvUnit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gu/cguvq7dzmhomcchbxvk4mcgrc7aszgsh5ytmkbdn727ju3aina23.py
# Topologically Sorted Source Nodes: [conv2d, r], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# conv2d => convolution
# r => add
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_3), kwargs = {})
triton_poi_fused_add_convolution_0 = async_compile.triton('triton_poi_fused_add_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, r], Original ATen: [aten.convolution, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_0.run(buf1, primals_2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.fft
import torch.utils.cpp_extension
import torch.nn
class ResidualConvUnit(nn.Module):
def __init__(self, cin, activation, bn):
super().__init__()
self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1,
bias=True)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
return self.skip_add.add(self.conv(x), x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cin': 4, 'activation': 4, 'bn': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.fft
import torch.utils.cpp_extension
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_2,
primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ResidualConvUnitNew(nn.Module):
def __init__(self, cin, activation, bn):
super().__init__()
self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1,
bias=True)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| autonomousvision/stylegan_xl | ResidualConvUnit | false | 14,965 | [
"MIT"
]
| 214 | 8c76531bcbf0931c295ecd1d32f75af998d1411f | https://github.com/autonomousvision/stylegan_xl/tree/8c76531bcbf0931c295ecd1d32f75af998d1411f |
StandardizedConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yj/cyjqxrdr34zdlpnaqepj4py4tvwh2ebdslxkfeu7skxqjn4syiak.py
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# mean_1 => mean_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2], True), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp21 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask)
tmp27 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask)
tmp28 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask)
tmp30 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask)
tmp32 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + (x2), tmp36, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nn/cnnlxy27st37ghyasby2wfzvdhe7fuiziq2zan6tepfxf2oe5jld.py
# Topologically Sorted Source Nodes: [weight_mean, weight, std, weight_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.div]
# Source node to ATen node mapping:
# std => sqrt, var
# weight => sub
# weight_1 => div
# weight_mean => mean_2
# Graph fragment:
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_1, [3], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_2), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1.0})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %expand), kwargs = {})
triton_per_fused_div_mean_std_sub_1 = async_compile.triton('triton_per_fused_div_mean_std_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_std_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mean_std_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp17 / tmp19
tmp21 = tmp11 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp25 = tl.where(xmask, tmp23, 0)
tmp26 = tl.sum(tmp25, 1)[:, None]
tmp27 = 63.0
tmp28 = tmp26 / tmp27
tmp29 = libdevice.sqrt(tmp28)
tmp30 = 1e-05
tmp31 = tmp29 + tmp30
tmp32 = tmp10 / tmp31
tl.store(out_ptr0 + (r1 + (64*x0)), tmp10, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp29, xmask)
tl.store(out_ptr1 + (r1 + (64*x0)), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf5 = buf3; del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight_mean, weight, std, weight_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.div]
triton_per_fused_div_mean_std_sub_1.run(buf5, primals_1, buf0, buf1, buf6, 4, 64, grid=grid(4), stream=stream0)
del buf0
del buf1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf8, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf8, primals_1, primals_3, buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class StandardizedConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(StandardizedConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True
).mean(dim=3, keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1
) + 1e-05
weight = weight / std.expand_as(weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp30 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x2, tmp36, xmask)
@triton.jit
def triton_per_fused_div_mean_std_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tl.where(xmask, tmp11, 0)
tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp17 / tmp19
tmp21 = tmp11 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp25 = tl.where(xmask, tmp23, 0)
tmp26 = tl.sum(tmp25, 1)[:, None]
tmp27 = 63.0
tmp28 = tmp26 / tmp27
tmp29 = libdevice.sqrt(tmp28)
tmp30 = 1e-05
tmp31 = tmp29 + tmp30
tmp32 = tmp10 / tmp31
tl.store(out_ptr0 + (r1 + 64 * x0), tmp10, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp32, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf5 = buf3
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_div_mean_std_sub_1[grid(4)](buf5, primals_1, buf0,
buf1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(16)](buf8, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf8, primals_1, primals_3, buf5, buf6
class StandardizedConv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(StandardizedConv2dNew, self).__init__(in_channels,
out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| blazejdolicki/vissl | StandardizedConv2d | false | 14,966 | [
"MIT"
]
| 2,512 | 9c10748a19fb1c637f32687142c8cd685f2410ff | https://github.com/blazejdolicki/vissl/tree/9c10748a19fb1c637f32687142c8cd685f2410ff |
AdaptiveInstanceNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/52/c526p7iwll7vx7gobeuv6q3lym4ek7lbhopuykpcibc57bou263i.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jo/cjo3wxmtawsvu7opemz2xwvsknw4nxv74xivifhgb7csue6qqjbi.py
# Topologically Sorted Source Nodes: [out, mul_1, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul_1 => mul_2
# out => add, rsqrt, var_mean
# out_1 => add_1
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %view_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %getitem_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, mul_1, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
triton_per_fused__native_batch_norm_legit_add_mul_1.run(buf5, primals_4, buf1, primals_2, buf2, buf6, 16, 16, grid=grid(16), stream=stream0)
del buf1
del primals_2
return (buf6, buf0, primals_3, primals_4, buf2, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.style(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4
), 0), out=buf1)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5,
primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf1
del primals_2
return buf6, buf0, primals_3, primals_4, buf2, buf5
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class AdaptiveInstanceNormNew(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input_0, input_1):
primals_2 = self.style.linear.bias
primals_1 = self.style.linear.weight_orig
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| blandocs/Tag2Pix | AdaptiveInstanceNorm | false | 14,967 | [
"MIT"
]
| 232 | 733d729067608dbe2c1122c9128f2f38bc0a8edd | https://github.com/blandocs/Tag2Pix/tree/733d729067608dbe2c1122c9128f2f38bc0a8edd |
LabelSmoothingBCE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fv/cfvrnoxnx3hmrhzs46ipufr5zookbi4tvziof7hfy5yxhfmb3eo5.py
# Topologically Sorted Source Nodes: [eq_1, smooth_target_1, eq, smooth_target, binary_cross_entropy_with_logits], Original ATen: [aten.eq, aten.masked_fill, aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default_2, log1p, minimum, mul, neg, sub, sub_1, sub_2
# eq => eq
# eq_1 => eq_1
# smooth_target => full_default, where
# smooth_target_1 => full_default_1, where_1
# Graph fragment:
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %arg0_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %where), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %where_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_2, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0 = async_compile.triton('triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = tmp0 == tmp3
tmp5 = tl.where(tmp4, tmp3, tmp0)
tmp6 = tl.where(tmp2, tmp1, tmp5)
tmp7 = tmp3 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.minimum(tmp1, tmp8)
tmp11 = tl_math.abs(tmp8)
tmp12 = -tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp10 - tmp14
tmp16 = tmp9 - tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [eq_1, smooth_target_1, eq, smooth_target, binary_cross_entropy_with_logits], Original ATen: [aten.eq, aten.masked_fill, aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class LabelSmoothingBCE(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingBCE, self).__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
smooth_target = target.clone().masked_fill(target == 1, self.confidence
)
smooth_target = smooth_target.masked_fill(target == 0, self.smoothing)
return self.criterion(x, smooth_target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = tmp0 == tmp3
tmp5 = tl.where(tmp4, tmp3, tmp0)
tmp6 = tl.where(tmp2, tmp1, tmp5)
tmp7 = tmp3 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.minimum(tmp1, tmp8)
tmp11 = tl_math.abs(tmp8)
tmp12 = -tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp10 - tmp14
tmp16 = tmp9 - tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0[grid
(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf0,
class LabelSmoothingBCENew(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingBCENew, self).__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| boldsort/craftassist | LabelSmoothingBCE | false | 14,968 | [
"MIT"
]
| 626 | 8058d115a250e30deb60d969b7b1a5fefd6e974c | https://github.com/boldsort/craftassist/tree/8058d115a250e30deb60d969b7b1a5fefd6e974c |
NormalIsotropicCovarianceLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/p2/cp2oq4jw67hdt3ng6tv4d2tdem4pti4ddgqlamekhvkep4zjsren.py
# Topologically Sorted Source Nodes: [padding, softplus, variances], Original ATen: [aten.ones_like, aten.softplus, aten.mul]
# Source node to ATen node mapping:
# padding => full_default
# softplus => div, exp, gt, log1p, mul, where
# variances => mul_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_3, %div), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %full_default), kwargs = {})
triton_poi_fused_mul_ones_like_softplus_0 = async_compile.triton('triton_poi_fused_mul_ones_like_softplus_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_ones_like_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_ones_like_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tmp9 = tmp8 * tmp1
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [means], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [padding, softplus, variances], Original ATen: [aten.ones_like, aten.softplus, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_ones_like_softplus_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import abc
import math
import torch
class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta):
"""Probabilistic layer to be used by the encoder/decoder of a
Variational AutoEncoder.
"""
@abc.abstractmethod
def forward(self, inputs):
"""Compute the parameters of the distribution conditioned on the
input.
Args:
inputs (``torch.Tensor[N,dim]``): Conditional inputs.
Returns:
params (object): Parameters of the distribution.
"""
pass
@abc.abstractmethod
def samples_and_llh(self, params, use_mean=False):
"""Samples using the reparameterization trick so that the each
sample can be backpropagated trough. In addition, returns the
log-likelihood of the samples given the sampling distribution.
Args:
params (object): Parameters of the sampling distribution.
use_mean (boolean): If true, by pass the sampling and
just return the mean value of the distribution.
Returns:
samples (``torch.Tensor``): Sampled values.
llh (``torch.Tensor``): Log-likelihood for each sample.
"""
pass
@abc.abstractmethod
def log_likelihood(self, data, params):
"""Log-likelihood of the data.
Args:
data (``torch.Tensor[N,dim]``): Data.
params (object): Parameters of the distribution.
"""
pass
class NormalDiagonalCovarianceLayer(ProbabilisticLayer):
"""Normal distribution with diagonal covariance matrix layer."""
def __init__(self, dim_in, dim_out, variance_nonlinearity=None):
super().__init__()
self.mean = torch.nn.Linear(dim_in, dim_out)
self.logvar = torch.nn.Linear(dim_in, dim_out)
if variance_nonlinearity is None:
variance_nonlinearity = torch.nn.Softplus()
self.variance_nonlinearity = variance_nonlinearity
def forward(self, inputs):
return self.mean(inputs), self.variance_nonlinearity(self.logvar(
inputs))
def samples_and_llh(self, params, use_mean=False):
means, variances = params
if use_mean:
samples = means
else:
dtype, device = means.dtype, means.device
noise = torch.randn(*means.shape, dtype=dtype, device=device)
std_dev = variances.sqrt()
samples = means + std_dev * noise
llhs = self.log_likelihood(samples, params)
return samples, llhs
def log_likelihood(self, data, params):
means, variances = params
dim = means.shape[-1]
delta = torch.sum((data - means).pow(2) / variances, dim=-1)
return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log
(2 * math.pi))
class NormalIsotropicCovarianceLayer(NormalDiagonalCovarianceLayer):
"""Normal distribution with isotropic covariance matrix layer."""
def __init__(self, dim_in, dim_out, variance_nonlinearity=None):
super().__init__(dim_in, dim_out)
self.mean = torch.nn.Linear(dim_in, dim_out)
self.logvar = torch.nn.Linear(dim_in, 1)
if variance_nonlinearity is None:
variance_nonlinearity = torch.nn.Softplus()
self.variance_nonlinearity = variance_nonlinearity
def forward(self, inputs):
means = self.mean(inputs)
padding = torch.ones_like(means)
variances = self.variance_nonlinearity(self.logvar(inputs)) * padding
return means, variances
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import abc
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_ones_like_softplus_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tmp9 = tmp8 * tmp1
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_ones_like_softplus_0[grid(256)](buf2, buf3,
256, XBLOCK=256, num_warps=4, num_stages=1)
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta):
"""Probabilistic layer to be used by the encoder/decoder of a
Variational AutoEncoder.
"""
@abc.abstractmethod
def forward(self, inputs):
"""Compute the parameters of the distribution conditioned on the
input.
Args:
inputs (``torch.Tensor[N,dim]``): Conditional inputs.
Returns:
params (object): Parameters of the distribution.
"""
pass
@abc.abstractmethod
def samples_and_llh(self, params, use_mean=False):
"""Samples using the reparameterization trick so that the each
sample can be backpropagated trough. In addition, returns the
log-likelihood of the samples given the sampling distribution.
Args:
params (object): Parameters of the sampling distribution.
use_mean (boolean): If true, by pass the sampling and
just return the mean value of the distribution.
Returns:
samples (``torch.Tensor``): Sampled values.
llh (``torch.Tensor``): Log-likelihood for each sample.
"""
pass
@abc.abstractmethod
def log_likelihood(self, data, params):
"""Log-likelihood of the data.
Args:
data (``torch.Tensor[N,dim]``): Data.
params (object): Parameters of the distribution.
"""
pass
class NormalDiagonalCovarianceLayer(ProbabilisticLayer):
"""Normal distribution with diagonal covariance matrix layer."""
def __init__(self, dim_in, dim_out, variance_nonlinearity=None):
super().__init__()
self.mean = torch.nn.Linear(dim_in, dim_out)
self.logvar = torch.nn.Linear(dim_in, dim_out)
if variance_nonlinearity is None:
variance_nonlinearity = torch.nn.Softplus()
self.variance_nonlinearity = variance_nonlinearity
def forward(self, inputs):
return self.mean(inputs), self.variance_nonlinearity(self.logvar(
inputs))
def samples_and_llh(self, params, use_mean=False):
means, variances = params
if use_mean:
samples = means
else:
dtype, device = means.dtype, means.device
noise = torch.randn(*means.shape, dtype=dtype, device=device)
std_dev = variances.sqrt()
samples = means + std_dev * noise
llhs = self.log_likelihood(samples, params)
return samples, llhs
def log_likelihood(self, data, params):
means, variances = params
dim = means.shape[-1]
delta = torch.sum((data - means).pow(2) / variances, dim=-1)
return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log
(2 * math.pi))
class NormalIsotropicCovarianceLayerNew(NormalDiagonalCovarianceLayer):
"""Normal distribution with isotropic covariance matrix layer."""
def __init__(self, dim_in, dim_out, variance_nonlinearity=None):
super().__init__(dim_in, dim_out)
self.mean = torch.nn.Linear(dim_in, dim_out)
self.logvar = torch.nn.Linear(dim_in, 1)
if variance_nonlinearity is None:
variance_nonlinearity = torch.nn.Softplus()
self.variance_nonlinearity = variance_nonlinearity
def forward(self, input_0):
primals_1 = self.mean.weight
primals_2 = self.mean.bias
primals_4 = self.logvar.weight
primals_5 = self.logvar.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| bolajiy/beer | NormalIsotropicCovarianceLayer | false | 14,969 | [
"MIT"
]
| 46 | 6fe968c7ca4864437890aa6bd705755c2580696e | https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e |
NormalDiagonalCovarianceLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sa/csap5cthikzuwdmqu2ehow73ilvezq4ym46wfjbhn3cebigfe7xy.py
# Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus]
# Source node to ATen node mapping:
# softplus => div, exp, gt, log1p, mul, where
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_3, %div), kwargs = {})
triton_poi_fused_softplus_0 = async_compile.triton('triton_poi_fused_softplus_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import abc
import math
import torch
class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta):
"""Probabilistic layer to be used by the encoder/decoder of a
Variational AutoEncoder.
"""
@abc.abstractmethod
def forward(self, inputs):
"""Compute the parameters of the distribution conditioned on the
input.
Args:
inputs (``torch.Tensor[N,dim]``): Conditional inputs.
Returns:
params (object): Parameters of the distribution.
"""
pass
@abc.abstractmethod
def samples_and_llh(self, params, use_mean=False):
"""Samples using the reparameterization trick so that the each
sample can be backpropagated trough. In addition, returns the
log-likelihood of the samples given the sampling distribution.
Args:
params (object): Parameters of the sampling distribution.
use_mean (boolean): If true, by pass the sampling and
just return the mean value of the distribution.
Returns:
samples (``torch.Tensor``): Sampled values.
llh (``torch.Tensor``): Log-likelihood for each sample.
"""
pass
@abc.abstractmethod
def log_likelihood(self, data, params):
"""Log-likelihood of the data.
Args:
data (``torch.Tensor[N,dim]``): Data.
params (object): Parameters of the distribution.
"""
pass
class NormalDiagonalCovarianceLayer(ProbabilisticLayer):
"""Normal distribution with diagonal covariance matrix layer."""
def __init__(self, dim_in, dim_out, variance_nonlinearity=None):
super().__init__()
self.mean = torch.nn.Linear(dim_in, dim_out)
self.logvar = torch.nn.Linear(dim_in, dim_out)
if variance_nonlinearity is None:
variance_nonlinearity = torch.nn.Softplus()
self.variance_nonlinearity = variance_nonlinearity
def forward(self, inputs):
return self.mean(inputs), self.variance_nonlinearity(self.logvar(
inputs))
def samples_and_llh(self, params, use_mean=False):
means, variances = params
if use_mean:
samples = means
else:
dtype, device = means.dtype, means.device
noise = torch.randn(*means.shape, dtype=dtype, device=device)
std_dev = variances.sqrt()
samples = means + std_dev * noise
llhs = self.log_likelihood(samples, params)
return samples, llhs
def log_likelihood(self, data, params):
means, variances = params
dim = means.shape[-1]
delta = torch.sum((data - means).pow(2) / variances, dim=-1)
return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log
(2 * math.pi))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import abc
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta):
"""Probabilistic layer to be used by the encoder/decoder of a
Variational AutoEncoder.
"""
@abc.abstractmethod
def forward(self, inputs):
"""Compute the parameters of the distribution conditioned on the
input.
Args:
inputs (``torch.Tensor[N,dim]``): Conditional inputs.
Returns:
params (object): Parameters of the distribution.
"""
pass
@abc.abstractmethod
def samples_and_llh(self, params, use_mean=False):
"""Samples using the reparameterization trick so that the each
sample can be backpropagated trough. In addition, returns the
log-likelihood of the samples given the sampling distribution.
Args:
params (object): Parameters of the sampling distribution.
use_mean (boolean): If true, by pass the sampling and
just return the mean value of the distribution.
Returns:
samples (``torch.Tensor``): Sampled values.
llh (``torch.Tensor``): Log-likelihood for each sample.
"""
pass
@abc.abstractmethod
def log_likelihood(self, data, params):
"""Log-likelihood of the data.
Args:
data (``torch.Tensor[N,dim]``): Data.
params (object): Parameters of the distribution.
"""
pass
class NormalDiagonalCovarianceLayerNew(ProbabilisticLayer):
"""Normal distribution with diagonal covariance matrix layer."""
def __init__(self, dim_in, dim_out, variance_nonlinearity=None):
super().__init__()
self.mean = torch.nn.Linear(dim_in, dim_out)
self.logvar = torch.nn.Linear(dim_in, dim_out)
if variance_nonlinearity is None:
variance_nonlinearity = torch.nn.Softplus()
self.variance_nonlinearity = variance_nonlinearity
def samples_and_llh(self, params, use_mean=False):
means, variances = params
if use_mean:
samples = means
else:
dtype, device = means.dtype, means.device
noise = torch.randn(*means.shape, dtype=dtype, device=device)
std_dev = variances.sqrt()
samples = means + std_dev * noise
llhs = self.log_likelihood(samples, params)
return samples, llhs
def log_likelihood(self, data, params):
means, variances = params
dim = means.shape[-1]
delta = torch.sum((data - means).pow(2) / variances, dim=-1)
return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log
(2 * math.pi))
def forward(self, input_0):
primals_1 = self.mean.weight
primals_2 = self.mean.bias
primals_4 = self.logvar.weight
primals_5 = self.logvar.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| bolajiy/beer | NormalDiagonalCovarianceLayer | false | 14,970 | [
"MIT"
]
| 46 | 6fe968c7ca4864437890aa6bd705755c2580696e | https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e |
SkipLastTargetChannelWrapper | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5s/c5sm5kphldgxwwp4doi74u3hybpyvhluugizqauidwaufw37p6ud.py
# Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss]
# Source node to ATen node mapping:
# mse_loss => mean, pow_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %slice_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_0 = async_compile.triton('triton_per_fused_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
r0 = rindex % 48
r1 = (rindex // 48)
tmp0 = tl.load(in_ptr0 + (r2), rmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = 192.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_0.run(buf1, arg1_1, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import MSELoss
class SkipLastTargetChannelWrapper(nn.Module):
"""
Loss wrapper which removes additional target channel
"""
def __init__(self, loss, squeeze_channel=False):
super(SkipLastTargetChannelWrapper, self).__init__()
self.loss = loss
self.squeeze_channel = squeeze_channel
def forward(self, input, target):
assert target.size(1
) > 1, 'Target tensor has a singleton channel dimension, cannot remove channel'
target = target[:, :-1, ...]
if self.squeeze_channel:
target = torch.squeeze(target, dim=1)
return self.loss(input, target)
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'loss': MSELoss()}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
r0 = rindex % 48
r1 = rindex // 48
tmp0 = tl.load(in_ptr0 + r2, rmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = 192.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SkipLastTargetChannelWrapperNew(nn.Module):
"""
Loss wrapper which removes additional target channel
"""
def __init__(self, loss, squeeze_channel=False):
super(SkipLastTargetChannelWrapperNew, self).__init__()
self.loss = loss
self.squeeze_channel = squeeze_channel
def forward(self, input_0, input_1):
arg1_1 = input_0
arg0_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bounesh/pytorch-3dunet | SkipLastTargetChannelWrapper | false | 14,971 | [
"MIT"
]
| 1,236 | 60278d01eaacc69feee731979826a0c26e223427 | https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427 |
WeightedSmoothL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xg/cxgg32ubkdfznwtvtyivfhfswp2zdg3yjej2sv2qkuy5ptz2xsmt.py
# Topologically Sorted Source Nodes: [l1, mask], Original ATen: [aten.smooth_l1_loss, aten.lt]
# Source node to ATen node mapping:
# l1 => abs_1, div, lt, mul, pow_1, sub, sub_1, where
# mask => lt_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {})
# %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%arg0_1, 4), kwargs = {})
triton_poi_fused_lt_smooth_l1_loss_0 = async_compile.triton('triton_poi_fused_lt_smooth_l1_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lt_smooth_l1_loss_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_lt_smooth_l1_loss_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp7
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp12 = 4.0
tmp13 = tmp1 < tmp12
tl.store(out_ptr0 + (x0), tmp11, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [l1, mask], Original ATen: [aten.smooth_l1_loss, aten.lt]
stream0 = get_raw_stream(0)
triton_poi_fused_lt_smooth_l1_loss_0.run(arg1_1, arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class WeightedSmoothL1Loss(nn.SmoothL1Loss):
def __init__(self, threshold, initial_weight, apply_below_threshold=True):
super().__init__(reduction='none')
self.threshold = threshold
self.apply_below_threshold = apply_below_threshold
self.weight = initial_weight
def forward(self, input, target):
l1 = super().forward(input, target)
if self.apply_below_threshold:
mask = target < self.threshold
else:
mask = target >= self.threshold
l1[mask] = l1[mask] * self.weight
return l1.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'threshold': 4, 'initial_weight': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_lt_smooth_l1_loss_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp7
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp12 = 4.0
tmp13 = tmp1 < tmp12
tl.store(out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_lt_smooth_l1_loss_0[grid(256)](arg1_1, arg0_1,
buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0, buf1
class WeightedSmoothL1LossNew(nn.SmoothL1Loss):
def __init__(self, threshold, initial_weight, apply_below_threshold=True):
super().__init__(reduction='none')
self.threshold = threshold
self.apply_below_threshold = apply_below_threshold
self.weight = initial_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bounesh/pytorch-3dunet | WeightedSmoothL1Loss | false | 14,972 | [
"MIT"
]
| 1,236 | 60278d01eaacc69feee731979826a0c26e223427 | https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427 |
GELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zf/czf4b75gr6eqse3otrscdjeulescoi5ktlpvmsmjlmhwzv5w7coq.py
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def __init__(self):
super(GELUNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bubbliiiing/classification-pytorch | GELU | false | 14,973 | [
"MIT"
]
| 88 | ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 | https://github.com/bubbliiiing/classification-pytorch/tree/ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 |
HighwayNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ki/cki5tciw2mqxckl6wp54w7myqiup2tteoeveayxtieh64bftxlft.py
# Topologically Sorted Source Nodes: [sub, gate, nonlin, mul, sub_1, mul_1, res], Original ATen: [aten.sub, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# gate => sigmoid
# mul => mul
# mul_1 => mul_1
# nonlin => relu
# res => add
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 2), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sub,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %relu), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp10 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp3 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp3
tmp11 = tmp9 * tmp10
tmp12 = tmp7 + tmp11
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [lin], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, gate, nonlin, mul, sub_1, mul_1, res], Original ATen: [aten.sub, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0.run(buf0, buf2, buf1, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class HighwayNetwork(nn.Module):
def __init__(self, in_dim, out_dim):
super(HighwayNetwork, self).__init__()
self.gate_proj = nn.Linear(in_dim, out_dim)
self.lin_proj = nn.Linear(in_dim, out_dim)
self.nonlin_proj = nn.Linear(in_dim, out_dim)
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
else:
torch.nn.init.constant_(p, 0)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x) - 2)
lin = self.lin_proj(x)
nonlin = torch.relu(self.nonlin_proj(x))
res = gate * nonlin + (1 - gate) * lin
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp10 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp3 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp3
tmp11 = tmp9 * tmp10
tmp12 = tmp7 + tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0[grid(256)](buf0,
buf2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, buf1, buf2
class HighwayNetworkNew(nn.Module):
def __init__(self, in_dim, out_dim):
super(HighwayNetworkNew, self).__init__()
self.gate_proj = nn.Linear(in_dim, out_dim)
self.lin_proj = nn.Linear(in_dim, out_dim)
self.nonlin_proj = nn.Linear(in_dim, out_dim)
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
else:
torch.nn.init.constant_(p, 0)
def forward(self, input_0):
primals_1 = self.gate_proj.weight
primals_2 = self.gate_proj.bias
primals_4 = self.lin_proj.weight
primals_5 = self.lin_proj.bias
primals_6 = self.nonlin_proj.weight
primals_7 = self.nonlin_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| boldsort/craftassist | HighwayNetwork | false | 14,974 | [
"MIT"
]
| 626 | 8058d115a250e30deb60d969b7b1a5fefd6e974c | https://github.com/boldsort/craftassist/tree/8058d115a250e30deb60d969b7b1a5fefd6e974c |
ResidualFeedFowardBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# h1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yd/cydzgmjezy53rwudarymzax4pxqhp2xr7ouqyygnpgzduhhzoqbz.py
# Topologically Sorted Source Nodes: [h2, add], Original ATen: [aten.tanh, aten.add]
# Source node to ATen node mapping:
# add => add
# h2 => tanh_1
# Graph fragment:
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh_1, %primals_3), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tmp1 + tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h2, add], Original ATen: [aten.tanh, aten.add]
triton_poi_fused_add_tanh_1.run(buf2, primals_3, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf2, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class ResidualFeedFowardBlock(torch.nn.Module):
"""Block of two feed-forward layer with a reisdual connection:
f(W1^T x + b1) f(W2^T h1 + b2 ) h2 + x
x ------------------> h1 --------------------> h2 ----------> y
| ^
| Residual connection |
+----------------------------------------------+
"""
def __init__(self, dim_in, width, activation_fn=torch.nn.Tanh):
super().__init__()
self.layer1 = torch.nn.Linear(dim_in, width)
self.layer2 = torch.nn.Linear(width, dim_in)
self.activation_fn = activation_fn()
def forward(self, x):
h1 = self.activation_fn(self.layer1(x))
h2 = self.activation_fn(self.layer2(h1))
return h2 + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'width': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tmp1 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_tanh_1[grid(256)](buf2, primals_3, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf2, primals_4
class ResidualFeedFowardBlockNew(torch.nn.Module):
"""Block of two feed-forward layer with a reisdual connection:
f(W1^T x + b1) f(W2^T h1 + b2 ) h2 + x
x ------------------> h1 --------------------> h2 ----------> y
| ^
| Residual connection |
+----------------------------------------------+
"""
def __init__(self, dim_in, width, activation_fn=torch.nn.Tanh):
super().__init__()
self.layer1 = torch.nn.Linear(dim_in, width)
self.layer2 = torch.nn.Linear(width, dim_in)
self.activation_fn = activation_fn()
def forward(self, input_0):
primals_1 = self.layer1.weight
primals_2 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bolajiy/beer | ResidualFeedFowardBlock | false | 14,976 | [
"MIT"
]
| 46 | 6fe968c7ca4864437890aa6bd705755c2580696e | https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e |
EpeLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xf/cxfpx362dwqeg2i3bzxq2f4axrx6jyqemm3cd53qm4z2kw3rcnwb.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1]), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp4 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp5 = tl.load(in_ptr1 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp10 = tl.load(in_ptr1 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp14 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp15 = tl.load(in_ptr1 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 0.0
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.where(xmask, tmp22, 0)
tmp25 = tl.sum(tmp24, 1)[:, None]
tmp26 = 16.0
tmp27 = tmp25 / tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, arg0_1, arg1_1, 4, 16, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class EpeLoss(nn.Module):
def __init__(self, eps=0):
super(EpeLoss, self).__init__()
self.eps = eps
def forward(self, pred, label):
loss = ((pred - label).pow(2).sum(1) + self.eps).sqrt()
return loss.view(loss.shape[0], -1).mean(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp10 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp14 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp15 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 0.0
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.where(xmask, tmp22, 0)
tmp25 = tl.sum(tmp24, 1)[:, None]
tmp26 = 16.0
tmp27 = tmp25 / tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp27, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, arg0_1, arg1_1, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class EpeLossNew(nn.Module):
def __init__(self, eps=0):
super(EpeLossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| brightvioletlight/MaskFlownet-Pytorch | EpeLoss | false | 14,977 | [
"MIT"
]
| 75 | 4158bac3b2fe50bfdf4216b4890ce24a8011227a | https://github.com/brightvioletlight/MaskFlownet-Pytorch/tree/4158bac3b2fe50bfdf4216b4890ce24a8011227a |
ExtResNetBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nq/cnqfeo5iu3yeiof2x33r7xnbaj7fk2w7g5swc25ak43vd4xaupdu.py
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu]
# Source node to ATen node mapping:
# input_2 => add, add_1, mul_1, rsqrt, var_mean
# input_3 => expm1, gt, mul_2, mul_4, where
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_6), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_2, %mul_4), kwargs = {})
triton_per_fused_elu_native_group_norm_0 = async_compile.triton('triton_per_fused_elu_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_elu_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 1.0
tmp31 = tmp27 * tmp30
tmp32 = libdevice.expm1(tmp31)
tmp33 = tmp32 * tmp30
tmp34 = tl.where(tmp29, tmp31, tmp33)
tl.store(in_out_ptr0 + (r1 + (64*x0)), tmp34, xmask)
tl.store(out_ptr2 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u5/cu5cyt57jdnmvmp7ergqhphxhmiitnaocaheuhyvmg4limh7mwlo.py
# Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu]
# Source node to ATen node mapping:
# input_8 => add_4, add_5, mul_11, rsqrt_2, var_mean_2
# out => add_6
# out_1 => expm1_2, gt_2, mul_12, mul_14, where_2
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_22), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %unsqueeze_19), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %where), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {})
# %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 1.0), kwargs = {})
# %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_12, %mul_14), kwargs = {})
triton_per_fused_add_elu_native_group_norm_1 = async_compile.triton('triton_per_fused_add_elu_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_elu_native_group_norm_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp32 = 1.0
tmp33 = tmp29 * tmp32
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp32
tmp36 = tl.where(tmp31, tmp33, tmp35)
tl.store(in_out_ptr0 + (r1 + (64*x0)), tmp36, xmask)
tl.store(out_ptr2 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf4; del buf4 # reuse
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu]
stream0 = get_raw_stream(0)
triton_per_fused_elu_native_group_norm_0.run(buf6, buf0, primals_3, primals_4, buf1, buf5, 4, 64, grid=grid(4), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf11; del buf11 # reuse
buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.native_group_norm, aten.elu]
triton_per_fused_elu_native_group_norm_0.run(buf13, buf7, primals_6, primals_7, buf8, buf12, 4, 64, grid=grid(4), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = buf19; del buf19 # reuse
buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu]
triton_per_fused_add_elu_native_group_norm_1.run(buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 4, 64, grid=grid(4), stream=stream0)
del primals_10
return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf5, (4, 1), (1, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 1), (1, 1), 0), reinterpret_tensor(buf12, (4, 1), (1, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 1), (1, 1), 0), reinterpret_tensor(buf18, (4, 1), (1, 1), 0), buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size(int or tuple): size of the convolving kernel
order (string): order of things, e.g.
'cr' -> conv + ReLU
'gcr' -> groupnorm + conv + ReLU
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
'bcr' -> batchnorm + conv + ReLU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple): add zero-padding added to all three sides of the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
if is_before_conv:
num_channels = in_channels
else:
num_channels = out_channels
if num_channels < num_groups:
num_groups = 1
assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}'
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=num_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int or tuple): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple):
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'gcr', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlock(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlock, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
residual = out
out = self.conv2(out)
out = self.conv3(out)
out += residual
out = self.non_linearity(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 1.0
tmp31 = tmp27 * tmp30
tmp32 = libdevice.expm1(tmp31)
tmp33 = tmp32 * tmp30
tmp34 = tl.where(tmp29, tmp31, tmp33)
tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp34, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp32 = 1.0
tmp33 = tmp29 * tmp32
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp32
tmp36 = tl.where(tmp31, tmp33, tmp35)
tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp36, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf4
del buf4
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_elu_native_group_norm_0[grid(4)](buf6, buf0,
primals_3, primals_4, buf1, buf5, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4,
4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf11
del buf11
buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_elu_native_group_norm_0[grid(4)](buf13, buf7,
primals_6, primals_7, buf8, buf12, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_7
buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = buf19
del buf19
buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_add_elu_native_group_norm_1[grid(4)](buf20, buf14,
primals_9, primals_10, buf6, buf15, buf18, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_10
return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8,
primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64,
16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0),
reinterpret_tensor(buf5, (4, 1), (1, 1), 0), buf6, buf7,
reinterpret_tensor(buf8, (4, 1), (1, 1), 0), reinterpret_tensor(
buf12, (4, 1), (1, 1), 0), buf13, buf14, reinterpret_tensor(buf15,
(4, 1), (1, 1), 0), reinterpret_tensor(buf18, (4, 1), (1, 1), 0), buf20
)
def conv3d(in_channels, out_channels, kernel_size, bias, padding):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size(int or tuple): size of the convolving kernel
order (string): order of things, e.g.
'cr' -> conv + ReLU
'gcr' -> groupnorm + conv + ReLU
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
'bcr' -> batchnorm + conv + ReLU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple): add zero-padding added to all three sides of the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
if is_before_conv:
num_channels = in_channels
else:
num_channels = out_channels
if num_channels < num_groups:
num_groups = 1
assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}'
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=num_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int or tuple): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple):
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'gcr', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlockNew(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlockNew, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_3 = self.conv1.groupnorm.weight
primals_4 = self.conv1.groupnorm.bias
primals_5 = self.conv2.conv.weight
primals_6 = self.conv2.groupnorm.weight
primals_7 = self.conv2.groupnorm.bias
primals_8 = self.conv3.conv.weight
primals_9 = self.conv3.groupnorm.weight
primals_10 = self.conv3.groupnorm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| bounesh/pytorch-3dunet | ExtResNetBlock | false | 14,979 | [
"MIT"
]
| 1,236 | 60278d01eaacc69feee731979826a0c26e223427 | https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427 |
BCEDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/z6/cz6ehk6udjuldkbvdykpkjp4ihcvsvw26c57rsotg2zyo22imkez.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qy/cqy7ade3b5zspapgq3r6yfj5cbncx6zlctm6c2qhdgdpsoczelyy.py
# Topologically Sorted Source Nodes: [mul_1, intersect, mul_2, sum_2, mul_3, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersect => sum_1
# mul_1 => mul_2
# mul_2 => mul_3
# mul_3 => mul_4
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1]), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-1]), kwargs = {})
triton_per_fused_mul_sum_1 = async_compile.triton('triton_per_fused_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((16*x0) + (64*(r1 // 16)) + (r1 % 16)), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + ((16*x0) + (64*(r1 // 16)) + (r1 % 16)), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp2 * tmp2
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
tl.store(out_ptr2 + (x0), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2o/c2oaqlhwjvaim4gkjsmmdcmipjbwogjnjl6jkil6a6df23xruvt4.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, denominator, clamp, truediv, per_channel_dice, mean, sub, mul_5, add_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add, aten.clamp, aten.div, aten.mean, aten.rsub]
# Source node to ATen node mapping:
# add_1 => add_1
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# clamp => clamp_min
# denominator => add
# mean => mean_1
# mul => mul_1
# mul_5 => mul_6
# per_channel_dice => mul_5
# sub => sub_3
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %clamp_min), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_5,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mean_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_6), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tl.load(in_ptr2 + (r0), None)
tmp12 = tl.load(in_out_ptr0 + (0))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1])
tmp3 = tmp1 + tmp2
tmp4 = 1e-06
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp0 / tmp5
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 4.0
tmp17 = tmp15 * tmp16
tmp18 = tmp11 / tmp16
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tmp21 = tmp20 * tmp16
tmp22 = tmp17 + tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, intersect, mul_2, sum_2, mul_3, sum_3], Original ATen: [aten.mul, aten.sum]
triton_per_fused_mul_sum_1.run(arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, denominator, clamp, truediv, per_channel_dice, mean, sub, mul_5, add_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add, aten.clamp, aten.div, aten.mean, aten.rsub]
triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2.run(buf5, buf1, buf2, buf3, 1, 4, grid=grid(1), stream=stream0)
del buf1
del buf2
del buf3
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
transposed = tensor.permute(axis_order)
return transposed.contiguous().view(C, -1)
def compute_per_channel_dice(input, target, epsilon=1e-06, weight=None):
"""
Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given a multi channel input and target.
Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function.
Args:
input (torch.Tensor): NxCxSpatial input tensor
target (torch.Tensor): NxCxSpatial target tensor
epsilon (float): prevents division by zero
weight (torch.Tensor): Cx1 tensor of weight per channel/class
"""
assert input.size() == target.size(
), "'input' and 'target' must have the same shape"
input = flatten(input)
target = flatten(target)
target = target.float()
intersect = (input * target).sum(-1)
if weight is not None:
intersect = weight * intersect
denominator = (input * input).sum(-1) + (target * target).sum(-1)
return 2 * (intersect / denominator.clamp(min=epsilon))
class _AbstractDiceLoss(nn.Module):
"""
Base class for different implementations of Dice loss.
"""
def __init__(self, weight=None, normalization='sigmoid'):
super(_AbstractDiceLoss, self).__init__()
self.register_buffer('weight', weight)
assert normalization in ['sigmoid', 'softmax', 'none']
if normalization == 'sigmoid':
self.normalization = nn.Sigmoid()
elif normalization == 'softmax':
self.normalization = nn.Softmax(dim=1)
else:
self.normalization = lambda x: x
def dice(self, input, target, weight):
raise NotImplementedError
def forward(self, input, target):
input = self.normalization(input)
per_channel_dice = self.dice(input, target, weight=self.weight)
return 1.0 - torch.mean(per_channel_dice)
class DiceLoss(_AbstractDiceLoss):
"""Computes Dice Loss according to https://arxiv.org/abs/1606.04797.
For multi-class segmentation `weight` parameter can be used to assign different weights per class.
The input to the loss function is assumed to be a logit and will be normalized by the Sigmoid function.
"""
def __init__(self, weight=None, normalization='sigmoid'):
super().__init__(weight, normalization)
def dice(self, input, target, weight):
return compute_per_channel_dice(input, target, weight=self.weight)
class BCEDiceLoss(nn.Module):
"""Linear combination of BCE and Dice losses"""
def __init__(self, alpha, beta):
super(BCEDiceLoss, self).__init__()
self.alpha = alpha
self.bce = nn.BCEWithLogitsLoss()
self.beta = beta
self.dice = DiceLoss()
def forward(self, input, target):
return self.alpha * self.bce(input, target) + self.beta * self.dice(
input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4, 'beta': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
@triton.jit
def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp2 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp2 * tmp2
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
tl.store(out_ptr2 + x0, tmp17, xmask)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.load(in_ptr2 + r0, None)
tmp12 = tl.load(in_out_ptr0 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1])
tmp3 = tmp1 + tmp2
tmp4 = 1e-06
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp0 / tmp5
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 4.0
tmp17 = tmp15 * tmp16
tmp18 = tmp11 / tmp16
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tmp21 = tmp20 * tmp16
tmp22 = tmp17 + tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2,
buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf5 = buf0
del buf0
triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2[
grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del buf1
del buf2
del buf3
return buf5,
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
transposed = tensor.permute(axis_order)
return transposed.contiguous().view(C, -1)
def compute_per_channel_dice(input, target, epsilon=1e-06, weight=None):
"""
Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given a multi channel input and target.
Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function.
Args:
input (torch.Tensor): NxCxSpatial input tensor
target (torch.Tensor): NxCxSpatial target tensor
epsilon (float): prevents division by zero
weight (torch.Tensor): Cx1 tensor of weight per channel/class
"""
assert input.size() == target.size(
), "'input' and 'target' must have the same shape"
input = flatten(input)
target = flatten(target)
target = target.float()
intersect = (input * target).sum(-1)
if weight is not None:
intersect = weight * intersect
denominator = (input * input).sum(-1) + (target * target).sum(-1)
return 2 * (intersect / denominator.clamp(min=epsilon))
class _AbstractDiceLoss(nn.Module):
"""
Base class for different implementations of Dice loss.
"""
def __init__(self, weight=None, normalization='sigmoid'):
super(_AbstractDiceLoss, self).__init__()
self.register_buffer('weight', weight)
assert normalization in ['sigmoid', 'softmax', 'none']
if normalization == 'sigmoid':
self.normalization = nn.Sigmoid()
elif normalization == 'softmax':
self.normalization = nn.Softmax(dim=1)
else:
self.normalization = lambda x: x
def dice(self, input, target, weight):
raise NotImplementedError
def forward(self, input, target):
input = self.normalization(input)
per_channel_dice = self.dice(input, target, weight=self.weight)
return 1.0 - torch.mean(per_channel_dice)
class DiceLoss(_AbstractDiceLoss):
"""Computes Dice Loss according to https://arxiv.org/abs/1606.04797.
For multi-class segmentation `weight` parameter can be used to assign different weights per class.
The input to the loss function is assumed to be a logit and will be normalized by the Sigmoid function.
"""
def __init__(self, weight=None, normalization='sigmoid'):
super().__init__(weight, normalization)
def dice(self, input, target, weight):
return compute_per_channel_dice(input, target, weight=self.weight)
class BCEDiceLossNew(nn.Module):
"""Linear combination of BCE and Dice losses"""
def __init__(self, alpha, beta):
super(BCEDiceLossNew, self).__init__()
self.alpha = alpha
self.bce = nn.BCEWithLogitsLoss()
self.beta = beta
self.dice = DiceLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bounesh/pytorch-3dunet | BCEDiceLoss | false | 14,980 | [
"MIT"
]
| 1,236 | 60278d01eaacc69feee731979826a0c26e223427 | https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427 |
EpeLossWithMask | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/kg/ckgy2iz3tdhvxpxokygsn4ccbgu4ue5gpvk6mh5fr4cdj2y34hes.py
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, add, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# loss => sqrt
# pow_1 => pow_1
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_add_pow_sqrt_sub_sum_0 = async_compile.triton('triton_poi_fused_add_pow_sqrt_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_sqrt_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_pow_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 1e-08
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f4/cf4wtdpht4nec7biwsj2kyzgyrjaxkzenr5kqx7dnr3o62b3lbjc.py
# Topologically Sorted Source Nodes: [sum_2, sum_3, loss_2], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# loss_2 => div
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %sum_3), kwargs = {})
triton_per_fused_div_sum_1 = async_compile.triton('triton_per_fused_div_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, add, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.sqrt]
stream0 = get_raw_stream(0)
triton_poi_fused_add_pow_sqrt_sub_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sum_2, sum_3, loss_2], Original ATen: [aten.sum, aten.div]
triton_per_fused_div_sum_1.run(buf3, buf0, arg2_1, 4, 64, grid=grid(4), stream=stream0)
del arg2_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class EpeLossWithMask(nn.Module):
def __init__(self, eps=1e-08, q=None):
super(EpeLossWithMask, self).__init__()
self.eps = eps
self.q = q
def forward(self, pred, label, mask):
if self.q is not None:
loss = ((pred - label).abs().sum(1) + self.eps) ** self.q
else:
loss = ((pred - label).pow(2).sum(1) + self.eps).sqrt()
loss = loss * mask.squeeze(1)
loss = loss.view(loss.shape[0], -1).sum(1) / mask.view(mask.shape[0
], -1).sum(1)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 1e-08
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_per_fused_div_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sqrt_sub_sum_0[grid(64)](arg0_1, arg1_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = buf1
del buf1
triton_per_fused_div_sum_1[grid(4)](buf3, buf0, arg2_1, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf3,
class EpeLossWithMaskNew(nn.Module):
def __init__(self, eps=1e-08, q=None):
super(EpeLossWithMaskNew, self).__init__()
self.eps = eps
self.q = q
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| brightvioletlight/MaskFlownet-Pytorch | EpeLossWithMask | false | 14,981 | [
"MIT"
]
| 75 | 4158bac3b2fe50bfdf4216b4890ce24a8011227a | https://github.com/brightvioletlight/MaskFlownet-Pytorch/tree/4158bac3b2fe50bfdf4216b4890ce24a8011227a |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0), primals_3, out=buf4)
return (reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf3, primals_3, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.utils
class Attention(nn.Module):
def __init__(self, hidden_dim):
super(Attention, self).__init__()
self.hidden_dim = hidden_dim
self.ff = nn.Linear(in_features=hidden_dim, out_features=1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, contexts, context_masks=None):
"""
:param contexts: (batch_size, seq_len, n_hid)
:param context_masks: (batch_size, seq_len)
:return: (batch_size, n_hid), (batch_size, seq_len)
"""
out = self.ff(contexts)
out = out.view(contexts.size(0), contexts.size(1))
if context_masks is not None:
masked_out = out.masked_fill(context_masks, float('-inf'))
else:
masked_out = out
attn_weights = self.softmax(masked_out)
out = attn_weights.unsqueeze(1).bmm(contexts)
out = out.squeeze(1)
return out, attn_weights
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0
), primals_3, out=buf4)
return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf3, primals_3, buf3
class AttentionNew(nn.Module):
def __init__(self, hidden_dim):
super(AttentionNew, self).__init__()
self.hidden_dim = hidden_dim
self.ff = nn.Linear(in_features=hidden_dim, out_features=1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_1 = self.ff.weight
primals_2 = self.ff.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| bstee615/ReVeal | Attention | false | 14,982 | [
"MIT"
]
| 63 | fc22d0d54a3a23d4e0bc45a249b7eea22749685e | https://github.com/bstee615/ReVeal/tree/fc22d0d54a3a23d4e0bc45a249b7eea22749685e |
TriangularSylvester | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/55/c556jlky4dn3zysohrmkzzgx6ib2mru2umit7utahhvakbnltdhb.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = (xindex // 256)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5b/c5bcfwai3m7fkujgbes6ir6ympljjp6fb2wxoes7gvitdxram4du.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iw/ciwmzr5hq5tx2lqm3e3i4rmglvbvjaw6u6kl43c36rlbedge2b4m.py
# Topologically Sorted Source Nodes: [r2qzb, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# r2qzb => add
# tanh => tanh
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %arg3_1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_2 = async_compile.triton('triton_poi_fused_add_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vx/cvxsu2m52uyvpnycjr7aok3bayk4b4ym4xavnawourdmjqmrywqw.py
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# z_1 => add_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %unsqueeze), kwargs = {})
# %squeeze_2 : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dim](args = (%view_8, 1), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x0 = xindex % 64
x2 = (xindex // 256)
tmp0 = tl.load(in_out_ptr0 + (x5), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x5), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7f/c7fujxwq2f2xvmev3k42q4lrs7somuyvshrlgg6szpxlbg6im56g.py
# Topologically Sorted Source Nodes: [r2qzb, tanh_1, pow_1, sub, diag_j, diag_j_1, diag_j_2, abs_1, add_1, log_diag_j, log_det_j], Original ATen: [aten.add, aten.tanh, aten.pow, aten.rsub, aten.mul, aten.abs, aten.log, aten.sum]
# Source node to ATen node mapping:
# abs_1 => abs_1
# add_1 => add_3
# diag_j => mul
# diag_j_1 => mul_1
# diag_j_2 => add_2
# log_det_j => sum_1
# log_diag_j => log
# pow_1 => pow_1
# r2qzb => add
# sub => sub
# tanh_1 => tanh_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %arg3_1), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%tanh_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%diagonal, %diagonal_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %mul), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%log, [-1]), kwargs = {})
triton_poi_fused_abs_add_log_mul_pow_rsub_sum_tanh_4 = async_compile.triton('triton_poi_fused_abs_add_log_mul_pow_rsub_sum_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_log_mul_pow_rsub_sum_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_add_log_mul_pow_rsub_sum_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (16*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (16*x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr3 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr1 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr2 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr3 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr1 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr2 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr3 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp9 = tmp7 * tmp8
tmp10 = tmp6 * tmp9
tmp11 = tmp10 + tmp5
tmp12 = tl_math.abs(tmp11)
tmp13 = 1e-08
tmp14 = tmp12 + tmp13
tmp15 = tl_math.log(tmp14)
tmp18 = tmp16 + tmp17
tmp19 = libdevice.tanh(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = tmp5 - tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 * tmp24
tmp26 = tmp25 + tmp5
tmp27 = tl_math.abs(tmp26)
tmp28 = tmp27 + tmp13
tmp29 = tl_math.log(tmp28)
tmp30 = tmp15 + tmp29
tmp33 = tmp31 + tmp32
tmp34 = libdevice.tanh(tmp33)
tmp35 = tmp34 * tmp34
tmp36 = tmp5 - tmp35
tmp39 = tmp37 * tmp38
tmp40 = tmp36 * tmp39
tmp41 = tmp40 + tmp5
tmp42 = tl_math.abs(tmp41)
tmp43 = tmp42 + tmp13
tmp44 = tl_math.log(tmp43)
tmp45 = tmp30 + tmp44
tmp48 = tmp46 + tmp47
tmp49 = libdevice.tanh(tmp48)
tmp50 = tmp49 * tmp49
tmp51 = tmp5 - tmp50
tmp54 = tmp52 * tmp53
tmp55 = tmp51 * tmp54
tmp56 = tmp55 + tmp5
tmp57 = tl_math.abs(tmp56)
tmp58 = tmp57 + tmp13
tmp59 = tl_math.log(tmp58)
tmp60 = tmp45 + tmp59
tl.store(out_ptr0 + (x3), tmp60, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(arg2_1, buf1, 1024, grid=grid(1024), stream=stream0)
buf2 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), out=buf2)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [r2qzb, tanh], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_2.run(buf2, arg3_1, buf3, 1024, grid=grid(1024), stream=stream0)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(arg1_1, buf4, 1024, grid=grid(1024), stream=stream0)
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (64, 4, 4), (16, 4, 1), 0), out=buf5)
del buf3
del buf4
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf5 # reuse
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf7, arg0_1, 1024, grid=grid(1024), stream=stream0)
del arg0_1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r2qzb, tanh_1, pow_1, sub, diag_j, diag_j_1, diag_j_2, abs_1, add_1, log_diag_j, log_det_j], Original ATen: [aten.add, aten.tanh, aten.pow, aten.rsub, aten.mul, aten.abs, aten.log, aten.sum]
triton_poi_fused_abs_add_log_mul_pow_rsub_sum_tanh_4.run(buf2, arg3_1, arg1_1, arg2_1, buf8, 256, grid=grid(256), stream=stream0)
del arg1_1
del arg2_1
del arg3_1
del buf2
return (buf7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class TriangularSylvester(nn.Module):
"""
Sylvester normalizing flow with Q=P or Q=I.
"""
def __init__(self, z_size):
super(TriangularSylvester, self).__init__()
self.z_size = z_size
self.h = nn.Tanh()
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x) ** 2
def forward(self, zk, r1, r2, b, permute_z=None, sum_ldj=True):
"""
All flow parameters are amortized. conditions on diagonals of R1 and R2 need to be satisfied
outside of this function.
Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
with Q = P a permutation matrix (equal to identity matrix if permute_z=None)
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
zk = zk.unsqueeze(1)
diag_r1 = torch.diagonal(r1, 0, -1, -2)
diag_r2 = torch.diagonal(r2, 0, -1, -2)
if permute_z is not None:
z_per = zk[:, :, permute_z]
else:
z_per = zk
r2qzb = z_per @ r2.transpose(2, 1) + b
z = self.h(r2qzb) @ r1.transpose(2, 1)
if permute_z is not None:
z = z[:, :, permute_z]
z += zk
z = z.squeeze(1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.0
log_diag_j = (diag_j.abs() + 1e-08).log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'z_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex // 256
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_out_ptr0 + x5, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x5, tmp2, xmask)
@triton.jit
def triton_poi_fused_abs_add_log_mul_pow_rsub_sum_tanh_4(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + 16 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + 16 * x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr3 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr2 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp38 = tl.load(in_ptr3 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp47 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp52 = tl.load(in_ptr2 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp53 = tl.load(in_ptr3 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp9 = tmp7 * tmp8
tmp10 = tmp6 * tmp9
tmp11 = tmp10 + tmp5
tmp12 = tl_math.abs(tmp11)
tmp13 = 1e-08
tmp14 = tmp12 + tmp13
tmp15 = tl_math.log(tmp14)
tmp18 = tmp16 + tmp17
tmp19 = libdevice.tanh(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = tmp5 - tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 * tmp24
tmp26 = tmp25 + tmp5
tmp27 = tl_math.abs(tmp26)
tmp28 = tmp27 + tmp13
tmp29 = tl_math.log(tmp28)
tmp30 = tmp15 + tmp29
tmp33 = tmp31 + tmp32
tmp34 = libdevice.tanh(tmp33)
tmp35 = tmp34 * tmp34
tmp36 = tmp5 - tmp35
tmp39 = tmp37 * tmp38
tmp40 = tmp36 * tmp39
tmp41 = tmp40 + tmp5
tmp42 = tl_math.abs(tmp41)
tmp43 = tmp42 + tmp13
tmp44 = tl_math.log(tmp43)
tmp45 = tmp30 + tmp44
tmp48 = tmp46 + tmp47
tmp49 = libdevice.tanh(tmp48)
tmp50 = tmp49 * tmp49
tmp51 = tmp5 - tmp50
tmp54 = tmp52 * tmp53
tmp55 = tmp51 * tmp54
tmp56 = tmp55 + tmp5
tmp57 = tl_math.abs(tmp56)
tmp58 = tmp57 + tmp13
tmp59 = tl_math.log(tmp58)
tmp60 = tmp45 + tmp59
tl.store(out_ptr0 + x3, tmp60, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(1024)](arg2_1, buf1, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), out=buf2)
buf3 = buf1
del buf1
triton_poi_fused_add_tanh_2[grid(1024)](buf2, arg3_1, buf3, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf0
del buf0
triton_poi_fused_clone_1[grid(1024)](arg1_1, buf4, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (64, 4, 4), (16, 4, 1), 0), out=buf5)
del buf3
del buf4
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf5
buf7 = buf6
del buf6
triton_poi_fused_add_3[grid(1024)](buf7, arg0_1, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_abs_add_log_mul_pow_rsub_sum_tanh_4[grid(256)](buf2,
arg3_1, arg1_1, arg2_1, buf8, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del arg1_1
del arg2_1
del arg3_1
del buf2
return buf7, buf8
class TriangularSylvesterNew(nn.Module):
"""
Sylvester normalizing flow with Q=P or Q=I.
"""
def __init__(self, z_size):
super(TriangularSylvesterNew, self).__init__()
self.z_size = z_size
self.h = nn.Tanh()
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x) ** 2
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
| boldsort/NeuralDX7 | TriangularSylvester | false | 14,983 | [
"MIT"
]
| 119 | 327844cea18a6dfe35e0dc8f5de0832343487366 | https://github.com/boldsort/NeuralDX7/tree/327844cea18a6dfe35e0dc8f5de0832343487366 |
MSE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6a/c6at7k6mlvvyb6bhxconu56vmlljdfsi5u47zlmh7d3irv3tnmhl.py
# Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div]
# Source node to ATen node mapping:
# diffs => add
# mse => div
# neg => neg
# pow_1 => pow_1
# sum_1 => sum_1
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 256), kwargs = {})
triton_per_fused_add_div_neg_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_neg_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 0.00390625
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class MSE(nn.Module):
def __init__(self):
super(MSE, self).__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
mse = torch.sum(diffs.pow(2)) / n
return mse
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 0.00390625
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSENew(nn.Module):
def __init__(self):
super(MSENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| byamao1/MMSA | MSE | false | 14,984 | [
"MIT"
]
| 198 | 1a894d042144c9ac75b3465d38871ce8c2987251 | https://github.com/byamao1/MMSA/tree/1a894d042144c9ac75b3465d38871ce8c2987251 |
LRN | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/t4/ct42qpaygn7av2p6rystjl4hk3ybzwp5jyvmk3jaiukfiri3pq65.py
# Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div]
# Source node to ATen node mapping:
# add => add
# div_2 => pow_2
# mul => mul
# x => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.0), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {})
triton_poi_fused_add_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True
):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True
):
super(LRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bruinxiong/BNM | LRN | false | 14,985 | [
"MIT"
]
| 252 | 71d4b8c9beca00e77fcbc62a12b69bb093736a82 | https://github.com/bruinxiong/BNM/tree/71d4b8c9beca00e77fcbc62a12b69bb093736a82 |
SIMSE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yv/cyvtugm5zmfyef6ozbsgz6j663e2vlhm4figzcm4sgsvgkz2jjx5.py
# Topologically Sorted Source Nodes: [neg, diffs, sum_1, pow_1, simse], Original ATen: [aten.neg, aten.add, aten.sum, aten.pow, aten.div]
# Source node to ATen node mapping:
# diffs => add
# neg => neg
# pow_1 => pow_1
# simse => div
# sum_1 => sum_1
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_1, 65536), kwargs = {})
triton_per_fused_add_div_neg_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_neg_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6 * tmp6
tmp8 = 1.52587890625e-05
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, diffs, sum_1, pow_1, simse], Original ATen: [aten.neg, aten.add, aten.sum, aten.pow, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class SIMSE(nn.Module):
def __init__(self):
super(SIMSE, self).__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
simse = torch.sum(diffs).pow(2) / n ** 2
return simse
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6 * tmp6
tmp8 = 1.52587890625e-05
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SIMSENew(nn.Module):
def __init__(self):
super(SIMSENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| byamao1/MMSA | SIMSE | false | 14,986 | [
"MIT"
]
| 198 | 1a894d042144c9ac75b3465d38871ce8c2987251 | https://github.com/byamao1/MMSA/tree/1a894d042144c9ac75b3465d38871ce8c2987251 |
ActorCriticMLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/g5/cg5f2rptqnpi2mrqpqc4tujqpbrrrjrse6plhgftx425znsffpfv.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# a => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yh/cyhogxneodczl7mcnuf7mkhxldvr2nc5wj5e42agntthff4e45p7.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# a => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4), (4, 1))
assert_size_stride(primals_3, (128, ), (1, ))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf7, 8192, grid=grid(8192), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [a], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [c], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6)
del primals_7
return (buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
from typing import Tuple
from torch.nn import functional as F
class ActorCriticMLP(nn.Module):
"""MLP network with heads for actor and critic."""
def __init__(self, input_shape: 'Tuple[int]', n_actions: 'int',
hidden_size: 'int'=128):
"""
Args:
input_shape: observation shape of the environment
n_actions: number of discrete actions available in the environment
hidden_size: size of hidden layers
"""
super().__init__()
self.fc1 = nn.Linear(input_shape[0], hidden_size)
self.actor_head = nn.Linear(hidden_size, n_actions)
self.critic_head = nn.Linear(hidden_size, 1)
def forward(self, x) ->Tuple[Tensor, Tensor]:
"""Forward pass through network. Calculates the action logits and the value.
Args:
x: input to network
Returns:
action log probs (logits), value
"""
x = F.relu(self.fc1(x.float()))
a = F.log_softmax(self.actor_head(x), dim=-1)
c = self.critic_head(x)
return a, c
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_shape': [4, 4], 'n_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from typing import Tuple
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4), (4, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_3, buf7, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf3
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf6)
del primals_7
return buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf4, primals_6, primals_4, buf7
class ActorCriticMLPNew(nn.Module):
"""MLP network with heads for actor and critic."""
def __init__(self, input_shape: 'Tuple[int]', n_actions: 'int',
hidden_size: 'int'=128):
"""
Args:
input_shape: observation shape of the environment
n_actions: number of discrete actions available in the environment
hidden_size: size of hidden layers
"""
super().__init__()
self.fc1 = nn.Linear(input_shape[0], hidden_size)
self.actor_head = nn.Linear(hidden_size, n_actions)
self.critic_head = nn.Linear(hidden_size, 1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.actor_head.weight
primals_5 = self.actor_head.bias
primals_6 = self.critic_head.weight
primals_7 = self.critic_head.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| bzrry/lightning-bolts | ActorCriticMLP | false | 14,987 | [
"Apache-2.0"
]
| 822 | bd392ad858039290c72c20cc3f10df39384e90b9 | https://github.com/bzrry/lightning-bolts/tree/bd392ad858039290c72c20cc3f10df39384e90b9 |
Mlp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zf/czf4b75gr6eqse3otrscdjeulescoi5ktlpvmsmjlmhwzv5w7coq.py
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, x_1], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# x_1 => mul_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, x_1], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class MlpNew(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bubbliiiing/classification-pytorch | Mlp | false | 14,988 | [
"MIT"
]
| 88 | ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 | https://github.com/bubbliiiing/classification-pytorch/tree/ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 |
PatchEmbed | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5s/c5sni7dzheaodogr5chdb3cizynndekqs4ajsctpfcvi3r5v37oa.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (768*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4c/c4ckui43udehobca2kb3vy5stpaqfztmtjwrdinx2dhmcmh73fmo.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [16, 16], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3072
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = (yindex // 768)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (768*x2) + (12288*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_2, (768, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 2304, 256, grid=grid(2304, 256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, buf0, stride=(16, 16), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf2, primals_2, buf3, 3072, 16, grid=grid(3072, 16), stream=stream0)
del buf2
del primals_2
return (reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0), buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((768, 3, 16, 16), (768, 256, 16, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class PatchEmbed(nn.Module):
def __init__(self, input_shape=[224, 224], patch_size=16, in_chans=3,
num_features=768, norm_layer=None, flatten=True):
super().__init__()
self.num_patches = input_shape[0] // patch_size * (input_shape[1] //
patch_size)
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, num_features, kernel_size=
patch_size, stride=patch_size)
self.norm = norm_layer(num_features) if norm_layer else nn.Identity()
def forward(self, x):
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 12288 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2304, 256)](primals_1, buf0, 2304, 256,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(16, 16),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_2[grid(3072, 16)](buf2, primals_2,
buf3, 3072, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del buf2
del primals_2
return reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0
), buf0, buf1
class PatchEmbedNew(nn.Module):
def __init__(self, input_shape=[224, 224], patch_size=16, in_chans=3,
num_features=768, norm_layer=None, flatten=True):
super().__init__()
self.num_patches = input_shape[0] // patch_size * (input_shape[1] //
patch_size)
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, num_features, kernel_size=
patch_size, stride=patch_size)
self.norm = norm_layer(num_features) if norm_layer else nn.Identity()
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bubbliiiing/classification-pytorch | PatchEmbed | false | 14,989 | [
"MIT"
]
| 88 | ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 | https://github.com/bubbliiiing/classification-pytorch/tree/ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 |
QRNNLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xz/cxzsf26ggtk5idcw5c2x5i2d4b6gsk4jdbmp5hww2rt2rzdcprsk.py
# Topologically Sorted Source Nodes: [Z_1, F_1, mul], Original ATen: [aten.tanh, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# F_1 => sigmoid
# Z_1 => tanh
# mul => mul
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem,), kwargs = {})
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
triton_poi_fused_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = tmp7 * tmp3
tl.store(out_ptr0 + (x2), tmp3, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
tl.store(out_ptr2 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a3/ca3bsjuefixqdmhw5ukfkbqau7sfqnwtokin5ojnvmcfqt46r2uk.py
# Topologically Sorted Source Nodes: [sub, mul_1, h_1, sub_1, mul_2, h_3], Original ATen: [aten.rsub, aten.mul, aten.add]
# Source node to ATen node mapping:
# h_1 => add
# h_3 => add_1
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %view_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, %mul_1), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_5), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_9, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr1 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp9 = tl.load(in_ptr1 + (32 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp5 = tmp2 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp1 - tmp7
tmp10 = tmp8 * tmp6
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp6, xmask)
tl.store(out_ptr2 + (x0), tmp8, xmask)
tl.store(out_ptr3 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kt/cktyghxfs5o5zgpoeaul6ggtwo7y4jndyee3b4cuq36rlohafg6x.py
# Topologically Sorted Source Nodes: [sub_2], Original ATen: [aten.rsub]
# Source node to ATen node mapping:
# sub_2 => sub_2
# Graph fragment:
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_6), kwargs = {})
triton_poi_fused_rsub_2 = async_compile.triton('triton_poi_fused_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_rsub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_rsub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ul/cull43lzdvgkohhnsx33ydgoct6h2wsep4ikyiwl7cdahwoarwya.py
# Topologically Sorted Source Nodes: [C, sigmoid_1, H], Original ATen: [aten.stack, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# C => cat
# H => mul_4
# sigmoid_1 => sigmoid_1
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_3, %view_4, %view_5, %view_6],), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %view_7), kwargs = {})
triton_poi_fused_mul_sigmoid_stack_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_stack_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp29 = tl.load(in_ptr4 + (8 + x0 + (12*x1)), xmask)
tmp30 = tl.load(in_ptr5 + (8 + x0), xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (48 + x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp21 = tl.load(in_ptr2 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp22 = tmp20 * tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp16, tmp23, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tmp28 = tl.where(tmp4, tmp5, tmp27)
tmp31 = tmp29 + tmp30
tmp32 = tl.sigmoid(tmp31)
tmp33 = tmp32 * tmp28
tl.store(out_ptr0 + (x2), tmp28, xmask)
tl.store(out_ptr1 + (x2), tmp32, xmask)
tl.store(out_ptr2 + (x2), tmp33, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [Z_1, F_1, mul], Original ATen: [aten.tanh, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0.run(buf0, primals_3, buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul_1, h_1, sub_1, mul_2, h_3], Original ATen: [aten.rsub, aten.mul, aten.add]
triton_poi_fused_add_mul_rsub_1.run(buf2, buf3, buf4, buf5, buf6, buf7, 16, grid=grid(16), stream=stream0)
buf8 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub_2], Original ATen: [aten.rsub]
triton_poi_fused_rsub_2.run(buf2, buf8, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [C, sigmoid_1, H], Original ATen: [aten.stack, aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_stack_3.run(buf3, buf5, buf7, buf8, buf0, primals_3, buf9, buf10, buf11, 64, grid=grid(64), stream=stream0)
del buf0
del primals_3
return (buf11, reinterpret_tensor(buf9, (1, 4, 4), (16, 4, 1), 48), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf1, buf2, reinterpret_tensor(buf3, (4, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), buf8, buf9, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.optim import *
class ForgetMult(torch.nn.Module):
"""ForgetMult computes a simple recurrent equation:
h_t = f_t * x_t + (1 - f_t) * h_{t-1}
This equation is equivalent to dynamic weighted averaging.
Inputs: X, hidden
- X (seq_len, batch, input_size): tensor containing the features of the input sequence.
- F (seq_len, batch, input_size): tensor containing the forget gate values, assumed in range [0, 1].
- hidden_init (batch, input_size): tensor containing the initial hidden state for the recurrence (h_{t-1}).
"""
def __init__(self):
super(ForgetMult, self).__init__()
def forward(self, f, x, hidden_init=None):
result = []
forgets = f.split(1, dim=0)
prev_h = hidden_init
for i, h in enumerate((f * x).split(1, dim=0)):
if prev_h is not None:
h = h + (1 - forgets[i]) * prev_h
h = h.view(h.size()[1:])
result.append(h)
prev_h = h
return torch.stack(result)
class QRNNLayer(nn.Module):
"""Applies a single layer Quasi-Recurrent Neural Network (QRNN) to an input sequence.
Args:
input_size: The number of expected features in the input x.
hidden_size: The number of features in the hidden state h. If not specified, the input size is used.
save_prev_x: Whether to store previous inputs for use in future convolutional windows (i.e. for a continuing sequence such as in language modeling). If true, you must call reset to remove cached previous values of x. Default: False.
window: Defines the size of the convolutional window (how many previous tokens to look when computing the QRNN values). Supports 1 and 2. Default: 1.
zoneout: Whether to apply zoneout (i.e. failing to update elements in the hidden state) to the hidden state updates. Default: 0.
output_gate: If True, performs QRNN-fo (applying an output gate to the output). If False, performs QRNN-f. Default: True.
Inputs: X, hidden
- X (seq_len, batch, input_size): tensor containing the features of the input sequence.
- hidden (batch, hidden_size): tensor containing the initial hidden state for the QRNN.
Outputs: output, h_n
- output (seq_len, batch, hidden_size): tensor containing the output of the QRNN for each timestep.
- h_n (1, batch, hidden_size): tensor containing the hidden state for t=seq_len
"""
def __init__(self, input_size, hidden_size=None, save_prev_x=False,
zoneout=0, window=1, output_gate=True):
super(QRNNLayer, self).__init__()
assert window in [1, 2
], 'This QRNN implementation currently only handles convolutional window of size 1 or size 2'
self.window = window
self.input_size = input_size
self.hidden_size = hidden_size if hidden_size else input_size
self.zoneout = zoneout
self.save_prev_x = save_prev_x
self.prevX = None
self.output_gate = output_gate
self.linear = nn.Linear(self.window * self.input_size, 3 * self.
hidden_size if self.output_gate else 2 * self.hidden_size)
def reset(self):
self.prevX = None
def forward(self, X, hidden=None):
seq_len, batch_size, _ = X.size()
source = None
if self.window == 1:
source = X
elif self.window == 2:
Xm1 = []
Xm1.append(self.prevX if self.prevX is not None else X[:1, :, :
] * 0)
if len(X) > 1:
Xm1.append(X[:-1, :, :])
Xm1 = torch.cat(Xm1, 0)
source = torch.cat([X, Xm1], 2)
Y = self.linear(source)
if self.output_gate:
Y = Y.view(seq_len, batch_size, 3 * self.hidden_size)
Z, F, O = Y.chunk(3, dim=2)
else:
Y = Y.view(seq_len, batch_size, 2 * self.hidden_size)
Z, F = Y.chunk(2, dim=2)
Z = torch.tanh(Z)
F = torch.sigmoid(F)
if self.zoneout:
if self.training:
mask = F.new_empty(F.size(), requires_grad=False).bernoulli_(
1 - self.zoneout)
F = F * mask
else:
F *= 1 - self.zoneout
C = ForgetMult()(F, Z, hidden)
if self.output_gate:
H = torch.sigmoid(O) * C
else:
H = C
if self.window > 1 and self.save_prev_x:
self.prevX = X[-1:, :, :].detach()
return H, C[-1:, :, :]
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.optim import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = tmp7 * tmp3
tl.store(out_ptr0 + x2, tmp3, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
tl.store(out_ptr2 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr1 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp9 = tl.load(in_ptr1 + (32 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp5 = tmp2 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp1 - tmp7
tmp10 = tmp8 * tmp6
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
tl.store(out_ptr2 + x0, tmp8, xmask)
tl.store(out_ptr3 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused_rsub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp29 = tl.load(in_ptr4 + (8 + x0 + 12 * x1), xmask)
tmp30 = tl.load(in_ptr5 + (8 + x0), xmask, eviction_policy='evict_last')
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (48 + x0 + 4 * (-12 + x1)), tmp16 & xmask,
other=0.0)
tmp20 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp21 = tl.load(in_ptr2 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp22 = tmp20 * tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp16, tmp23, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tmp28 = tl.where(tmp4, tmp5, tmp27)
tmp31 = tmp29 + tmp30
tmp32 = tl.sigmoid(tmp31)
tmp33 = tmp32 * tmp28
tl.store(out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr1 + x2, tmp32, xmask)
tl.store(out_ptr2 + x2, tmp33, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0[grid(64)](buf0, primals_3, buf1,
buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_1[grid(16)](buf2, buf3, buf4, buf5,
buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_rsub_2[grid(16)](buf2, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_stack_3[grid(64)](buf3, buf5, buf7,
buf8, buf0, primals_3, buf9, buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
del primals_3
return buf11, reinterpret_tensor(buf9, (1, 4, 4), (16, 4, 1), 48
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf1, buf2, reinterpret_tensor(buf3, (4, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), buf8, buf9, buf10
class ForgetMult(torch.nn.Module):
"""ForgetMult computes a simple recurrent equation:
h_t = f_t * x_t + (1 - f_t) * h_{t-1}
This equation is equivalent to dynamic weighted averaging.
Inputs: X, hidden
- X (seq_len, batch, input_size): tensor containing the features of the input sequence.
- F (seq_len, batch, input_size): tensor containing the forget gate values, assumed in range [0, 1].
- hidden_init (batch, input_size): tensor containing the initial hidden state for the recurrence (h_{t-1}).
"""
def __init__(self):
super(ForgetMult, self).__init__()
def forward(self, f, x, hidden_init=None):
result = []
forgets = f.split(1, dim=0)
prev_h = hidden_init
for i, h in enumerate((f * x).split(1, dim=0)):
if prev_h is not None:
h = h + (1 - forgets[i]) * prev_h
h = h.view(h.size()[1:])
result.append(h)
prev_h = h
return torch.stack(result)
class QRNNLayerNew(nn.Module):
"""Applies a single layer Quasi-Recurrent Neural Network (QRNN) to an input sequence.
Args:
input_size: The number of expected features in the input x.
hidden_size: The number of features in the hidden state h. If not specified, the input size is used.
save_prev_x: Whether to store previous inputs for use in future convolutional windows (i.e. for a continuing sequence such as in language modeling). If true, you must call reset to remove cached previous values of x. Default: False.
window: Defines the size of the convolutional window (how many previous tokens to look when computing the QRNN values). Supports 1 and 2. Default: 1.
zoneout: Whether to apply zoneout (i.e. failing to update elements in the hidden state) to the hidden state updates. Default: 0.
output_gate: If True, performs QRNN-fo (applying an output gate to the output). If False, performs QRNN-f. Default: True.
Inputs: X, hidden
- X (seq_len, batch, input_size): tensor containing the features of the input sequence.
- hidden (batch, hidden_size): tensor containing the initial hidden state for the QRNN.
Outputs: output, h_n
- output (seq_len, batch, hidden_size): tensor containing the output of the QRNN for each timestep.
- h_n (1, batch, hidden_size): tensor containing the hidden state for t=seq_len
"""
def __init__(self, input_size, hidden_size=None, save_prev_x=False,
zoneout=0, window=1, output_gate=True):
super(QRNNLayerNew, self).__init__()
assert window in [1, 2
], 'This QRNN implementation currently only handles convolutional window of size 1 or size 2'
self.window = window
self.input_size = input_size
self.hidden_size = hidden_size if hidden_size else input_size
self.zoneout = zoneout
self.save_prev_x = save_prev_x
self.prevX = None
self.output_gate = output_gate
self.linear = nn.Linear(self.window * self.input_size, 3 * self.
hidden_size if self.output_gate else 2 * self.hidden_size)
def reset(self):
self.prevX = None
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| boshining/NeuronBlocks | QRNNLayer | false | 14,990 | [
"MIT"
]
| 1,257 | 74fbb8658fb3f1cffea5c9bc84b2a1da59c20dd9 | https://github.com/boshining/NeuronBlocks/tree/74fbb8658fb3f1cffea5c9bc84b2a1da59c20dd9 |
DiffLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/a2/ca2bypdozo4vzjrvjjoirhpfgvzfuin2hjpbbvp4ycptip36csey.py
# Topologically Sorted Source Nodes: [input1_mean, input1_1, norm, add, input1_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# input1_1 => sub
# input1_l2 => div
# input1_mean => mean
# norm => pow_1, sum_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + r1), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + r1), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + r1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-06
tmp18 = tmp16 + tmp17
tmp19 = tmp10 / tmp18
tl.store(out_ptr2 + (r1 + (64*x0)), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e2/ce27barrmkjhojql257rnzuamyefwb737kzvhkz47ba5fumvkj2b.py
# Topologically Sorted Source Nodes: [pow_1, diff_loss], Original ATen: [aten.pow, aten.mean]
# Source node to ATen node mapping:
# diff_loss => mean_2
# pow_1 => pow_5
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mm, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_5,), kwargs = {})
triton_red_fused_mean_pow_1 = async_compile.triton('triton_red_fused_mean_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mean_pow_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input1_mean, input1_1, norm, add, input1_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0.run(arg0_1, buf4, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input2_mean, input2_1, norm_1, add_1, input2_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div]
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0.run(arg1_1, buf5, 4, 64, grid=grid(4), stream=stream0)
del arg1_1
buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, input2_l2, mm], Original ATen: [aten.add, aten.div, aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (1, 64), 0), buf5, out=buf6)
del buf4
del buf5
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [pow_1, diff_loss], Original ATen: [aten.pow, aten.mean]
triton_red_fused_mean_pow_1.run(buf8, buf6, 1, 4096, grid=grid(1), stream=stream0)
del buf6
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class DiffLoss(nn.Module):
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, input1, input2):
batch_size = input1.size(0)
input1 = input1.view(batch_size, -1)
input2 = input2.view(batch_size, -1)
input1_mean = torch.mean(input1, dim=0, keepdims=True)
input2_mean = torch.mean(input2, dim=0, keepdims=True)
input1 = input1 - input1_mean
input2 = input2 - input2_mean
input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach()
input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-06)
input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach()
input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-06)
diff_loss = torch.mean(input1_l2.t().mm(input2_l2).pow(2))
return diff_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mean_sub_0(in_ptr0,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + r1), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + r1), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + r1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-06
tmp18 = tmp16 + tmp17
tmp19 = tmp10 / tmp18
tl.store(out_ptr2 + (r1 + 64 * x0), tmp19, xmask)
@triton.jit
def triton_red_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg0_1,
buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg1_1,
buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (1, 64), 0),
buf5, out=buf6)
del buf4
del buf5
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7
del buf7
triton_red_fused_mean_pow_1[grid(1)](buf8, buf6, 1, 4096, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del buf6
return buf8,
class DiffLossNew(nn.Module):
def __init__(self):
super(DiffLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| byamao1/MMSA | DiffLoss | false | 14,991 | [
"MIT"
]
| 198 | 1a894d042144c9ac75b3465d38871ce8c2987251 | https://github.com/byamao1/MMSA/tree/1a894d042144c9ac75b3465d38871ce8c2987251 |
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qa/cqazar4hg4rdjbxm7zr5mix2w3dkhfmvvjksn7c6lktr5yfe6ndy.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c2/cc2wsialcqiknwetscnqy3fzaqmmib3cxfb7tsfjx7hdlsxbdq7s.py
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# attn_weights_4 => div_1
# sum_1 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_17, [1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf8, buf9, 4, 16, grid=grid(4, 16), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_3.run(buf7, buf11, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.utils.checkpoint
from torch.nn import Parameter
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
try:
attn_weights += attn_mask.unsqueeze(0)
except:
None
None
assert False
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.attn_dropout,
training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.utils.checkpoint
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| byamao1/MMSA | MultiheadAttention | false | 14,992 | [
"MIT"
]
| 198 | 1a894d042144c9ac75b3465d38871ce8c2987251 | https://github.com/byamao1/MMSA/tree/1a894d042144c9ac75b3465d38871ce8c2987251 |
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oe/coeqskvavjig2xfptqbr47z2ukzhyagx4dxozc4kzjwb2ykujlu4.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt, mul, where
# Graph fragment:
# %add_tensor_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_3), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_3, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_3, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_3, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uo/cuorclbf4h3pyyjppsftkziufkt34vxjph7yux3ly7ujjfqkt7ad.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_1, mul_1, where_1
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_5), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_2, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mf/cmfwsgrlk36ejtu74rx7ipaja6rfirwmwjgqhdqter22spc6ajam.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_5 => gt_2, mul_2, where_2
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.2), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_tensor_1, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhvyy3w3uejbzndu7qftnyc25sndrfzlmb3i2bzpyadobz7z7bm.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024, ), (1, ))
assert_size_stride(primals_4, (512, 1024), (1024, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (256, 512), (512, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (1, 256), (256, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 4096, grid=grid(4096), stream=stream0)
del buf0
del primals_3
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.leaky_relu, aten.native_dropout]
buf3 = torch.ops.aten.native_dropout.default(buf2, 0.3, True)
del buf2
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf6)
buf7 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf6, primals_5, buf7, buf8, 2048, grid=grid(2048), stream=stream0)
del buf6
del primals_5
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.leaky_relu, aten.native_dropout]
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.3, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf12)
buf13 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf14 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf12, primals_7, buf13, buf14, 1024, grid=grid(1024), stream=stream0)
del buf12
del primals_7
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.leaky_relu, aten.native_dropout]
buf15 = torch.ops.aten.native_dropout.default(buf14, 0.3, True)
del buf14
buf16 = buf15[0]
buf17 = buf15[1]
del buf15
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (256, 1), (1, 256), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf19, primals_9, 4, grid=grid(4), stream=stream0)
del primals_9
return (buf19, primals_1, buf1, buf4, buf5, buf7, buf10, buf11, buf13, buf16, buf17, buf19, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
class Discriminator(nn.Module):
def __init__(self, img_shape, hidden_dim=1024):
super().__init__()
in_dim = int(np.prod(img_shape))
self.fc1 = nn.Linear(in_dim, hidden_dim)
self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2)
self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2)
self.fc4 = nn.Linear(self.fc3.out_features, 1)
def forward(self, img):
x = img.view(img.size(0), -1)
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc2(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc3(x), 0.2)
x = F.dropout(x, 0.3)
return torch.sigmoid(self.fc4(x))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'img_shape': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (512, 1024), (1024, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (256, 512), (512, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (1, 256), (256, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024
), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(4096)](buf0, primals_3, buf1,
buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
buf3 = torch.ops.aten.native_dropout.default(buf2, 0.3, True)
del buf2
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1024, 512),
(1, 1024), 0), out=buf6)
buf7 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(2048)](buf6, primals_5, buf7,
buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_5
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.3, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (512, 256),
(1, 512), 0), out=buf12)
buf13 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf14 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(1024)](buf12, primals_7, buf13,
buf14, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf12
del primals_7
buf15 = torch.ops.aten.native_dropout.default(buf14, 0.3, True)
del buf14
buf16 = buf15[0]
buf17 = buf15[1]
del buf15
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (256, 1), (1,
256), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_sigmoid_3[grid(4)](buf19, primals_9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_9
return (buf19, primals_1, buf1, buf4, buf5, buf7, buf10, buf11, buf13,
buf16, buf17, buf19, primals_8, primals_6, primals_4)
class DiscriminatorNew(nn.Module):
def __init__(self, img_shape, hidden_dim=1024):
super().__init__()
in_dim = int(np.prod(img_shape))
self.fc1 = nn.Linear(in_dim, hidden_dim)
self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2)
self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2)
self.fc4 = nn.Linear(self.fc3.out_features, 1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| bzrry/lightning-bolts | Discriminator | false | 14,993 | [
"Apache-2.0"
]
| 822 | bd392ad858039290c72c20cc3f10df39384e90b9 | https://github.com/bzrry/lightning-bolts/tree/bd392ad858039290c72c20cc3f10df39384e90b9 |
SchedulerTestNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ej/cejq4hvgtngsrt5ywcfdheadnaab2ydhtz352v7vusjumjik42f2.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4b/c4bm44vlkroihtm4ebt4iyykoeyhr2cwl6horqusip6sdixccmyf.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16384, grid=grid(16384), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 16384, grid=grid(16384), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
class SchedulerTestNet(torch.nn.Module):
"""adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py."""
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(16384)](buf3, primals_5, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class SchedulerTestNetNew(torch.nn.Module):
"""adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py."""
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bzrry/lightning-bolts | SchedulerTestNet | false | 14,994 | [
"Apache-2.0"
]
| 822 | bd392ad858039290c72c20cc3f10df39384e90b9 | https://github.com/bzrry/lightning-bolts/tree/bd392ad858039290c72c20cc3f10df39384e90b9 |
SELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ik/cikb25u5x5667mb6rnbleob7teijkbt54zawgiyjor4rhenfpsjm.py
# Topologically Sorted Source Nodes: [mse_loss, sum_1], Original ATen: [aten.mse_loss, aten.sum]
# Source node to ATen node mapping:
# mse_loss => pow_1, sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
triton_poi_fused_mse_loss_sum_0 = async_compile.triton('triton_poi_fused_mse_loss_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mse_loss_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mse_loss_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mse_loss, sum_1], Original ATen: [aten.mse_loss, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mse_loss_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class SELoss(nn.MSELoss):
def __init__(self):
super().__init__(reduction='none')
def forward(self, inputs: 'Tensor', target: 'Tensor') ->Tensor:
return super().forward(inputs, target).sum(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mse_loss_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mse_loss_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SELossNew(nn.MSELoss):
def __init__(self):
super().__init__(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bzrry/lightning-bolts | SELoss | false | 14,995 | [
"Apache-2.0"
]
| 822 | bd392ad858039290c72c20cc3f10df39384e90b9 | https://github.com/bzrry/lightning-bolts/tree/bd392ad858039290c72c20cc3f10df39384e90b9 |
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lv/clvm7udsuucc354cwdidyuj7lng3dp4x6vt7xo67dgpdpp7rjyls.py
# Topologically Sorted Source Nodes: [bilinear, sigmoid], Original ATen: [aten.add, aten.sigmoid]
# Source node to ATen node mapping:
# bilinear => add
# sigmoid => sigmoid
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_4), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze,), kwargs = {})
triton_poi_fused_add_sigmoid_0 = async_compile.triton('triton_poi_fused_add_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear]
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf1 = buf0
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [bilinear, sigmoid], Original ATen: [aten.add, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_add_sigmoid_0.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
return (buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, n_in, n_out):
super(Discriminator, self).__init__()
self.f_k = nn.Bilinear(n_in, n_out, 1)
self.sigm = nn.Sigmoid()
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, S, node, s_bias=None):
S = S.expand_as(node)
score = torch.squeeze(self.f_k(node, S), 1)
if s_bias is not None:
score += s_bias
return self.sigm(score)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_2, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor(
primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf1 = buf0
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_add_sigmoid_0[grid(64)](buf2, primals_4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2
class DiscriminatorNew(nn.Module):
def __init__(self, n_in, n_out):
super(DiscriminatorNew, self).__init__()
self.f_k = nn.Bilinear(n_in, n_out, 1)
self.sigm = nn.Sigmoid()
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1):
primals_3 = self.f_k.weight
primals_4 = self.f_k.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| caojiangxia/BiGI | Discriminator | false | 14,996 | [
"MIT"
]
| 57 | ed54c20523a5b3f295b90a9c08f7c54e8258d04a | https://github.com/caojiangxia/BiGI/tree/ed54c20523a5b3f295b90a9c08f7c54e8258d04a |
GCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dm/cdmosrgisj2hljbfjatuyiee3tavwn5xcqrl5iwrqwfvea6ol4db.py
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# add => add
# x => gt, mul, where
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_4), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {})
triton_poi_fused_add_leaky_relu_0 = async_compile.triton('triton_poi_fused_add_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 4.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_leaky_relu_0.run(buf1, primals_4, buf2, buf3, 16, grid=grid(16), stream=stream0)
del buf1
del primals_4
return (buf3, buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features)
)
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.dropout = dropout
self.leakyrelu = nn.LeakyReLU(alpha)
def forward(self, x, adj):
x = self.leakyrelu(self.gc1(x, adj))
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 4.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, primals_4, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_4
return buf3, buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features)
)
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.dropout = dropout
self.leakyrelu = nn.LeakyReLU(alpha)
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc1.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| caojiangxia/BiGI | GCN | false | 14,997 | [
"MIT"
]
| 57 | ed54c20523a5b3f295b90a9c08f7c54e8258d04a | https://github.com/caojiangxia/BiGI/tree/ed54c20523a5b3f295b90a9c08f7c54e8258d04a |
Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3r/c3rfy3ljjc2bfodnr5gm65jr7ew6v6kno6w6jzahlupuqxbpvfkw.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p5/cp5wuljbdcz2dl2xvl4imkn5wmtmrnbb7mnld5glztiqavldlheh.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vv/cvvhis67uzj3m3ebbd4sgghaemqhihabasphltk5wytqdd6fe74t.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5y/c5yhyv7emyc7i2ozpvns6tsiqcvdzktqqpohy4sedfe7aihkojch.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xj/cxjpr2ute76xkk7edg7qlvolks2ggx2xwbrttteralhmvd2xsktw.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {})
triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jr/cjr3mjuwemgklde3zy7lrqxunugyqui3f2hy6zj6ufkp5mfj3lry.py
# Topologically Sorted Source Nodes: [mul_1, wrapped_sqrt, pow_1, mul_2, add_1, mul_3, tanh, add_2, x_5], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_6
# add_2 => add_7
# mul_1 => mul_5
# mul_2 => mul_6
# mul_3 => mul_7
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# x_5 => mul_8
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_13, 3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_13, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add_6), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_7,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_7), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_10 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pu/cpuql3oz4hmaygynopg7lq7xhfiv7hr7pr4vyzhfpmw34jymdp7q.py
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add_2
# x_3 => add_3
# x_9 => add_8
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, ), (1, ))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, wrapped_sqrt, pow_1, mul_2, add_1, mul_3, tanh, add_2, x_5], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
triton_poi_fused_add_mul_pow_sqrt_tanh_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0)
del primals_12
return (buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0,
proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=
0.0, attn_drop=0.0, drop_path=0.0, act_layer=GELU, norm_layer=nn.
LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio
), act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_10(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_add_mul_pow_sqrt_tanh_10[grid(256)](buf16, buf17,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0,
proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=
0.0, attn_drop=0.0, drop_path=0.0, act_layer=GELU, norm_layer=nn.
LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio
), act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_6 = self.attn.proj.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_12 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| bubbliiiing/classification-pytorch | Block | false | 14,998 | [
"MIT"
]
| 88 | ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 | https://github.com/bubbliiiing/classification-pytorch/tree/ee62c05bd3094c3fab48bada5a57cb2ed8b61c11 |
Ln_distance | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/by/cby5kj263blyqgiihr7dzulbjsup7h2tnto4rdtait5s4uy5oywr.py
# Topologically Sorted Source Nodes: [d, abs_1, pow_1, sum_1, pow_2], Original ATen: [aten.sub, aten.abs, aten.pow, aten.sum]
# Source node to ATen node mapping:
# abs_1 => abs_1
# d => sub
# pow_1 => pow_1
# pow_2 => pow_2
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.25), kwargs = {})
triton_per_fused_abs_pow_sub_sum_0 = async_compile.triton('triton_per_fused_abs_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 0.25
tmp11 = libdevice.pow(tmp9, tmp10)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [d, abs_1, pow_1, sum_1, pow_2], Original ATen: [aten.sub, aten.abs, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
class Ln_distance(nn.Module):
"""If dims is None Compute across all dimensions except first"""
def __init__(self, n, dim=None):
super(Ln_distance, self).__init__()
self.n = n
self.dim = dim
def forward(self, x, y):
d = x - y
if self.dim is None:
self.dim = list(range(1, len(d.shape)))
return torch.abs(d).pow(self.n).sum(dim=self.dim).pow(1.0 / float(
self.n))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 0.25
tmp11 = libdevice.pow(tmp9, tmp10)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_pow_sub_sum_0[grid(4)](buf1, arg0_1, arg1_1, 4,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class Ln_distanceNew(nn.Module):
"""If dims is None Compute across all dimensions except first"""
def __init__(self, n, dim=None):
super(Ln_distanceNew, self).__init__()
self.n = n
self.dim = dim
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| carla-recourse/CARLA | Ln_distance | false | 15,000 | [
"MIT"
]
| 140 | e9bb3152598a94e700c38d7377825054959dcf48 | https://github.com/carla-recourse/CARLA/tree/e9bb3152598a94e700c38d7377825054959dcf48 |
CombinedTargetMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3f/c3fha6hwigp5qdkirvgzpdtvtztnza4ys4zevam5i2owamrhkdzx.py
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
# Source node to ATen node mapping:
# heatmap_gt_1 => mul_1
# heatmap_pred_1 => mul
# loss => add
# loss_1 => add_1
# loss_2 => add_2
# mse_loss => mean, pow_1, sub
# mse_loss_1 => mean_1, pow_2, sub_1
# mse_loss_2 => mean_2, pow_3, sub_2
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %select), kwargs = {})
# %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %select_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_8), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {})
triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tmp42 = tmp41 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp42, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0.run(buf3, arg0_1, arg2_1, arg1_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CombinedTargetMSELoss(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_channels = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1
)
loss = 0.0
num_joints = num_channels // 3
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx * 3].squeeze()
heatmap_gt = heatmaps_gt[idx * 3].squeeze()
offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()
offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()
offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()
offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()
if self.use_target_weight:
heatmap_pred = heatmap_pred * target_weight[:, idx]
heatmap_gt = heatmap_gt * target_weight[:, idx]
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,
heatmap_gt * offset_x_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,
heatmap_gt * offset_y_gt)
return loss / num_joints * self.loss_weight
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tmp42 = tmp41 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf3, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class CombinedTargetMSELossNew(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| carolchenyx/mmpose | CombinedTargetMSELoss | false | 15,001 | [
"Apache-2.0"
]
| 367 | cd74bf1d0b13954188cc678415fd0ef98a74b46b | https://github.com/carolchenyx/mmpose/tree/cd74bf1d0b13954188cc678415fd0ef98a74b46b |
Square | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xw/cxw7ds6vzofd5wrsg64nof7fgm4mm2ikktzhwb255ex5w5gucy4o.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Square(nn.Module):
def __init__(self):
super(Square, self).__init__()
def forward(self, x):
return torch.mul(x, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SquareNew(nn.Module):
def __init__(self):
super(SquareNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| carlzhangweiwen/gazelle_mpc | Square | false | 15,002 | [
"MIT"
]
| 50 | 45818ccf6375100a8fe2680f44f37d713380aa5c | https://github.com/carlzhangweiwen/gazelle_mpc/tree/45818ccf6375100a8fe2680f44f37d713380aa5c |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4b/c4bcug5vkha2hj57viumhdilkslvdcnorocrxpnqzqpt6x767j3b.py
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# value_1 => mul
# value_3 => exp, sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %mm), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_mul_0 = async_compile.triton('triton_poi_fused__softmax_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp3
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp3
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp8 - tmp19
tmp25 = tmp24 * tmp21
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp23 + tmp26
tmp28 = tmp13 - tmp19
tmp29 = tmp28 * tmp21
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp27 + tmp30
tmp32 = tmp18 - tmp19
tmp33 = tmp32 * tmp21
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp31 + tmp34
tl.store(out_ptr0 + (x0), tmp19, xmask)
tl.store(out_ptr1 + (x0), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xa/cxa3a5qoeofgxax64qpycbkpkrqitogqnqea3gnacj5tx7rblerz.py
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# value_1 => mul
# value_3 => div_1, exp
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %mm), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_mul_1 = async_compile.triton('triton_poi_fused__softmax_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [user], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [item], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_mul_0.run(primals_7, buf2, buf3, buf4, 4, grid=grid(4), stream=stream0)
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_1.run(buf5, primals_7, buf3, buf4, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(buf0, buf5, buf1, alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [value_4], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf7)
buf8 = buf4; del buf4 # reuse
buf9 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [value_5, value_7], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_0.run(primals_8, buf7, buf8, buf9, 4, grid=grid(4), stream=stream0)
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [value_5, value_7], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_1.run(buf10, primals_8, buf8, buf9, 16, grid=grid(16), stream=stream0)
del buf8
del buf9
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(buf1, buf10, buf0, alpha=1, beta=1, out=buf11)
return (buf6, buf11, primals_3, primals_6, primals_7, primals_8, buf0, buf1, buf5, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.lin_u = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.lin_v = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.opt = opt
def forward(self, user, item, UV_adj, VU_adj):
user = self.lin_u(user)
item = self.lin_v(item)
query = user
key = item
value = torch.mm(query, key.transpose(0, 1))
value = UV_adj.to_dense() * value
value /= math.sqrt(self.opt['hidden_dim'])
value = F.softmax(value, dim=1)
learn_user = torch.matmul(value, key) + user
query = item
key = user
value = torch.mm(query, key.transpose(0, 1))
value = VU_adj.to_dense() * value
value /= math.sqrt(self.opt['hidden_dim'])
value = F.softmax(value, dim=1)
learn_item = torch.matmul(value, key) + item
return learn_user, learn_item
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(hidden_dim=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp3
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp3
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp8 - tmp19
tmp25 = tmp24 * tmp21
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp23 + tmp26
tmp28 = tmp13 - tmp19
tmp29 = tmp28 * tmp21
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp27 + tmp30
tmp32 = tmp18 - tmp19
tmp33 = tmp32 * tmp21
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp31 + tmp34
tl.store(out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mul_0[grid(4)](primals_7, buf2, buf3,
buf4, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf5 = buf2
del buf2
triton_poi_fused__softmax_mul_1[grid(16)](buf5, primals_7, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf0, buf5, buf1, alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
out=buf7)
buf8 = buf4
del buf4
buf9 = buf3
del buf3
triton_poi_fused__softmax_mul_0[grid(4)](primals_8, buf7, buf8,
buf9, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf10 = buf7
del buf7
triton_poi_fused__softmax_mul_1[grid(16)](buf10, primals_8, buf8,
buf9, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf8
del buf9
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, buf10, buf0, alpha=1, beta=1, out=buf11)
return (buf6, buf11, primals_3, primals_6, primals_7, primals_8, buf0,
buf1, buf5, buf10)
class AttentionNew(nn.Module):
def __init__(self, opt):
super(AttentionNew, self).__init__()
self.lin_u = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.lin_v = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.opt = opt
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.lin_u.weight
primals_2 = self.lin_u.bias
primals_3 = self.lin_v.weight
primals_5 = self.lin_v.bias
primals_4 = input_0
primals_6 = input_1
primals_7 = input_2
primals_8 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| caojiangxia/BiGI | Attention | false | 15,003 | [
"MIT"
]
| 57 | ed54c20523a5b3f295b90a9c08f7c54e8258d04a | https://github.com/caojiangxia/BiGI/tree/ed54c20523a5b3f295b90a9c08f7c54e8258d04a |
SplitChannels | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/u5/cu56dhpcth43gy4shrd7mcexf4nfa6qetnnhwe4mno4v6ug76h6j.py
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# a_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 0, 4, 4), (0, 16, 4, 1), torch.float32)
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class SplitChannels(torch.nn.Module):
def __init__(self, split_location):
super(SplitChannels, self).__init__()
self.split_location = split_location
def forward(self, x):
a, b = x[:, :self.split_location], x[:, self.split_location:]
a, b = a.clone(), b.clone()
del x
return a, b
def inverse(self, x, y):
return torch.cat([x, y], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'split_location': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 0, 4, 4), (0, 16, 4, 1), torch.float32)
return buf0, buf1
class SplitChannelsNew(torch.nn.Module):
def __init__(self, split_location):
super(SplitChannelsNew, self).__init__()
self.split_location = split_location
def inverse(self, x, y):
return torch.cat([x, y], dim=1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| cetmann/iunets | SplitChannels | false | 15,004 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
SoftTargetCrossEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7e/c7eos52pj4trwrwevfplxacwgfirtfuiycj3hrmzuhm4mq7vguud.py
# Topologically Sorted Source Nodes: [neg, log_softmax, mul, loss, mean], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.mean]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# loss => sum_2
# mean => mean
# mul => mul
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {})
triton_per_fused__log_softmax_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 * tmp14
tmp17 = -tmp16
tmp18 = tmp4 - tmp13
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = -tmp21
tmp23 = tmp7 - tmp13
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = -tmp26
tmp28 = tmp10 - tmp13
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [neg, log_softmax, mul, loss, mean], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.mean]
triton_per_fused__log_softmax_mean_mul_neg_sum_1.run(buf3, arg0_1, buf0, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed
class SoftTargetCrossEntropy(nn.Module):
def forward(self, x, target):
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 * tmp14
tmp17 = -tmp16
tmp18 = tmp4 - tmp13
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = -tmp21
tmp23 = tmp7 - tmp13
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = -tmp26
tmp28 = tmp10 - tmp13
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3,
arg0_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
class SoftTargetCrossEntropyNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ccjlovewsy/relabel_imagenet | SoftTargetCrossEntropy | false | 15,005 | [
"Apache-2.0"
]
| 344 | 6cd84dffe4ce8005395970b2938b3196d0958351 | https://github.com/ccjlovewsy/relabel_imagenet/tree/6cd84dffe4ce8005395970b2938b3196d0958351 |
SmoothCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [lsm], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# lsm => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xz/cxzrhhdsigxzd7l33hed2jl7lxvtwhqe4lxz7hmqo775cqjakfzl.py
# Topologically Sorted Source Nodes: [targets, lsm, mul, sum_1, loss, loss_1], Original ATen: [aten.scatter, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean]
# Source node to ATen node mapping:
# loss => neg
# loss_1 => mean
# lsm => exp, log, sub_1, sum_1
# mul => mul
# sum_1 => sum_2
# targets => scatter_upon_const_tensor
# Graph fragment:
# %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: 0.0, dtype: torch.float32, dim: 1, selector: %unsqueeze, val: 1.0})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%scatter_upon_const_tensor, %sub_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {})
triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (4*r2), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (2 + (4*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*r2)), None, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp6 - tmp17
tmp19 = tmp5 * tmp18
tmp20 = tl.full([1, 1], 1, tl.int64)
tmp21 = tmp0 == tmp20
tmp22 = tl.where(tmp21, tmp3, tmp4)
tmp23 = tmp8 - tmp17
tmp24 = tmp22 * tmp23
tmp25 = tmp19 + tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp0 == tmp26
tmp28 = tl.where(tmp27, tmp3, tmp4)
tmp29 = tmp11 - tmp17
tmp30 = tmp28 * tmp29
tmp31 = tmp25 + tmp30
tmp32 = tl.full([1, 1], 3, tl.int64)
tmp33 = tmp0 == tmp32
tmp34 = tl.where(tmp33, tmp3, tmp4)
tmp35 = tmp14 - tmp17
tmp36 = tmp34 * tmp35
tmp37 = tmp31 + tmp36
tmp38 = -tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [lsm], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [targets, lsm, mul, sum_1, loss, loss_1], Original ATen: [aten.scatter, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean]
triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1.run(buf3, arg1_1, buf0, 1, 64, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
class SmoothCrossEntropyLoss(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets: 'torch.Tensor', n_classes: 'int',
smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = torch.empty(size=(targets.size(0), n_classes), device
=targets.device).fill_(smoothing / (n_classes - 1)).scatter_(
1, targets.data.unsqueeze(1), 1.0 - smoothing)
return targets
def forward(self, inputs, targets):
targets = SmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.
size(-1), self.smoothing)
lsm = F.log_softmax(inputs, -1)
if self.weight is not None:
lsm = lsm * self.weight.unsqueeze(0)
loss = -(targets * lsm).sum(-1)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _WeightedLoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 4 * r2, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * r2), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (2 + 4 * r2), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + 4 * r2), None, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp6 - tmp17
tmp19 = tmp5 * tmp18
tmp20 = tl.full([1, 1], 1, tl.int64)
tmp21 = tmp0 == tmp20
tmp22 = tl.where(tmp21, tmp3, tmp4)
tmp23 = tmp8 - tmp17
tmp24 = tmp22 * tmp23
tmp25 = tmp19 + tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp0 == tmp26
tmp28 = tl.where(tmp27, tmp3, tmp4)
tmp29 = tmp11 - tmp17
tmp30 = tmp28 * tmp29
tmp31 = tmp25 + tmp30
tmp32 = tl.full([1, 1], 3, tl.int64)
tmp33 = tmp0 == tmp32
tmp34 = tl.where(tmp33, tmp3, tmp4)
tmp35 = tmp14 - tmp17
tmp36 = tmp34 * tmp35
tmp37 = tmp31 + tmp36
tmp38 = -tmp37
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1[grid(1)](buf3,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
class SmoothCrossEntropyLossNew(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets: 'torch.Tensor', n_classes: 'int',
smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = torch.empty(size=(targets.size(0), n_classes), device
=targets.device).fill_(smoothing / (n_classes - 1)).scatter_(
1, targets.data.unsqueeze(1), 1.0 - smoothing)
return targets
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| cclauss/archai | SmoothCrossEntropyLoss | false | 15,006 | [
"MIT"
]
| 344 | a5fb8f937f7f1319e3204120803b2a045e9f768b | https://github.com/cclauss/archai/tree/a5fb8f937f7f1319e3204120803b2a045e9f768b |
GAT | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4b/c4bcug5vkha2hj57viumhdilkslvdcnorocrxpnqzqpt6x767j3b.py
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# value_1 => mul
# value_3 => exp, sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %mm), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_mul_0 = async_compile.triton('triton_poi_fused__softmax_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp3
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp3
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp8 - tmp19
tmp25 = tmp24 * tmp21
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp23 + tmp26
tmp28 = tmp13 - tmp19
tmp29 = tmp28 * tmp21
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp27 + tmp30
tmp32 = tmp18 - tmp19
tmp33 = tmp32 * tmp21
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp31 + tmp34
tl.store(out_ptr0 + (x0), tmp19, xmask)
tl.store(out_ptr1 + (x0), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xa/cxa3a5qoeofgxax64qpycbkpkrqitogqnqea3gnacj5tx7rblerz.py
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# value_1 => mul
# value_3 => div_1, exp
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %mm), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_mul_1 = async_compile.triton('triton_poi_fused__softmax_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [user], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, primals_1, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [item], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, primals_2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_mul_0.run(primals_7, buf2, buf3, buf4, 4, grid=grid(4), stream=stream0)
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [value_1, value_3], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_1.run(buf5, primals_7, buf3, buf4, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(buf0, buf5, buf1, alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [value_4], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf7)
buf8 = buf4; del buf4 # reuse
buf9 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [value_5, value_7], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_0.run(primals_8, buf7, buf8, buf9, 4, grid=grid(4), stream=stream0)
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [value_5, value_7], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_1.run(buf10, primals_8, buf8, buf9, 16, grid=grid(16), stream=stream0)
del buf8
del buf9
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(buf1, buf10, buf0, alpha=1, beta=1, out=buf11)
return (buf6, buf11, primals_1, primals_2, primals_7, primals_8, buf0, buf1, buf5, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.lin_u = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.lin_v = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.opt = opt
def forward(self, user, item, UV_adj, VU_adj):
user = self.lin_u(user)
item = self.lin_v(item)
query = user
key = item
value = torch.mm(query, key.transpose(0, 1))
value = UV_adj.to_dense() * value
value /= math.sqrt(self.opt['hidden_dim'])
value = F.softmax(value, dim=1)
learn_user = torch.matmul(value, key) + user
query = item
key = user
value = torch.mm(query, key.transpose(0, 1))
value = VU_adj.to_dense() * value
value /= math.sqrt(self.opt['hidden_dim'])
value = F.softmax(value, dim=1)
learn_item = torch.matmul(value, key) + item
return learn_user, learn_item
class GAT(nn.Module):
def __init__(self, opt):
super(GAT, self).__init__()
self.att = Attention(opt)
self.dropout = opt['dropout']
self.leakyrelu = nn.LeakyReLU(opt['leakey'])
def forward(self, ufea, vfea, UV_adj, VU_adj, adj=None):
learn_user = ufea
learn_item = vfea
learn_user = F.dropout(learn_user, self.dropout, training=self.training
)
learn_item = F.dropout(learn_item, self.dropout, training=self.training
)
learn_user, learn_item = self.att(learn_user, learn_item, UV_adj,
VU_adj)
return learn_user, learn_item
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(hidden_dim=4, dropout=0.5, leakey=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp3
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp3
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp8 - tmp19
tmp25 = tmp24 * tmp21
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp23 + tmp26
tmp28 = tmp13 - tmp19
tmp29 = tmp28 * tmp21
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp27 + tmp30
tmp32 = tmp18 - tmp19
tmp33 = tmp32 * tmp21
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp31 + tmp34
tl.store(out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, primals_1, reinterpret_tensor(
primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, primals_2, reinterpret_tensor(
primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mul_0[grid(4)](primals_7, buf2, buf3,
buf4, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf5 = buf2
del buf2
triton_poi_fused__softmax_mul_1[grid(16)](buf5, primals_7, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf0, buf5, buf1, alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
out=buf7)
buf8 = buf4
del buf4
buf9 = buf3
del buf3
triton_poi_fused__softmax_mul_0[grid(4)](primals_8, buf7, buf8,
buf9, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf10 = buf7
del buf7
triton_poi_fused__softmax_mul_1[grid(16)](buf10, primals_8, buf8,
buf9, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf8
del buf9
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, buf10, buf0, alpha=1, beta=1, out=buf11)
return (buf6, buf11, primals_1, primals_2, primals_7, primals_8, buf0,
buf1, buf5, buf10)
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.lin_u = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.lin_v = nn.Linear(opt['hidden_dim'], opt['hidden_dim'])
self.opt = opt
def forward(self, user, item, UV_adj, VU_adj):
user = self.lin_u(user)
item = self.lin_v(item)
query = user
key = item
value = torch.mm(query, key.transpose(0, 1))
value = UV_adj.to_dense() * value
value /= math.sqrt(self.opt['hidden_dim'])
value = F.softmax(value, dim=1)
learn_user = torch.matmul(value, key) + user
query = item
key = user
value = torch.mm(query, key.transpose(0, 1))
value = VU_adj.to_dense() * value
value /= math.sqrt(self.opt['hidden_dim'])
value = F.softmax(value, dim=1)
learn_item = torch.matmul(value, key) + item
return learn_user, learn_item
class GATNew(nn.Module):
def __init__(self, opt):
super(GATNew, self).__init__()
self.att = Attention(opt)
self.dropout = opt['dropout']
self.leakyrelu = nn.LeakyReLU(opt['leakey'])
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.att.lin_u.weight
primals_4 = self.att.lin_u.bias
primals_2 = self.att.lin_v.weight
primals_6 = self.att.lin_v.bias
primals_3 = input_0
primals_5 = input_1
primals_7 = input_2
primals_8 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| caojiangxia/BiGI | GAT | false | 15,007 | [
"MIT"
]
| 57 | ed54c20523a5b3f295b90a9c08f7c54e8258d04a | https://github.com/caojiangxia/BiGI/tree/ed54c20523a5b3f295b90a9c08f7c54e8258d04a |
LinfDistance | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4q/c4q4e4n3zxfyaf7sbupb7psgwdvqjruntbudkoirawbm6ifxemtx.py
# Topologically Sorted Source Nodes: [abs_1, max_1], Original ATen: [aten.abs, aten.max]
# Source node to ATen node mapping:
# abs_1 => abs_1
# max_1 => max_1
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%view,), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%abs_1, 1), kwargs = {})
triton_per_fused_abs_max_0 = async_compile.triton('triton_per_fused_abs_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_max_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, float("-inf"))
tmp7 = triton_helpers.max2(tmp6, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [abs_1, max_1], Original ATen: [aten.abs, aten.max]
stream0 = get_raw_stream(0)
triton_per_fused_abs_max_0.run(arg0_1, arg1_1, buf0, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.autograd
class LinfDistance(nn.Module):
def forward(self, img1, img2):
return (img1 - img2).reshape(img1.shape[0], -1).abs().max(dim=1)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_max_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, float('-inf'))
tmp7 = triton_helpers.max2(tmp6, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_max_0[grid(4)](arg0_1, arg1_1, buf0, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class LinfDistanceNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| cassidylaidlaw/perceptual-advex | LinfDistance | false | 15,008 | [
"MIT"
]
| 45 | d39136eb5b5e950442456ddade6b4f4fba3dd8f6 | https://github.com/cassidylaidlaw/perceptual-advex/tree/d39136eb5b5e950442456ddade6b4f4fba3dd8f6 |
ImageNetNormalizer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5t/c5twkwfbv7hgkadmaq6h5wresi3d4l6blhkqy5qhc2klpal24nly.py
# Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div]
# Source node to ATen node mapping:
# sub => sub
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %unsqueeze_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %unsqueeze_5), kwargs = {})
triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 3
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = x1
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp1 < tmp2
tmp4 = tl.full([1], 2, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = 0.4560000002384186
tmp7 = 0.4059999883174896
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = 0.48500001430511475
tmp10 = tl.where(tmp3, tmp9, tmp8)
tmp11 = tmp0 - tmp10
tmp12 = 0.2240000069141388
tmp13 = 0.22499999403953552
tmp14 = tl.where(tmp5, tmp12, tmp13)
tmp15 = 0.2290000021457672
tmp16 = tl.where(tmp3, tmp15, tmp14)
tmp17 = tmp11 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sub_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.autograd
class ImageNetNormalizer(nn.Module):
def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
super().__init__()
self.mean = mean
self.std = std
def forward(self, x):
mean = torch.tensor(self.mean, device=x.device)
std = torch.tensor(self.std, device=x.device)
return (x - mean[None, :, None, None]) / std[None, :, None, None]
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 3
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = x1
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp1 < tmp2
tmp4 = tl.full([1], 2, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = 0.4560000002384186
tmp7 = 0.4059999883174896
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = 0.48500001430511475
tmp10 = tl.where(tmp3, tmp9, tmp8)
tmp11 = tmp0 - tmp10
tmp12 = 0.2240000069141388
tmp13 = 0.22499999403953552
tmp14 = tl.where(tmp5, tmp12, tmp13)
tmp15 = 0.2290000021457672
tmp16 = tl.where(tmp3, tmp15, tmp14)
tmp17 = tmp11 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ImageNetNormalizerNew(nn.Module):
def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
super().__init__()
self.mean = mean
self.std = std
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cassidylaidlaw/perceptual-advex | ImageNetNormalizer | false | 15,009 | [
"MIT"
]
| 45 | d39136eb5b5e950442456ddade6b4f4fba3dd8f6 | https://github.com/cassidylaidlaw/perceptual-advex/tree/d39136eb5b5e950442456ddade6b4f4fba3dd8f6 |
L2Distance | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rc/crca2ncueyktbqvp3ptu7ygu4tcxrdf37qsbzfeltvhe624nd4dv.py
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_per_fused_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0.run(buf1, arg0_1, arg1_1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.autograd
class L2Distance(nn.Module):
def forward(self, img1, img2):
return (img1 - img2).reshape(img1.shape[0], -1).norm(dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(4)](buf1, arg0_1, arg1_1,
4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L2DistanceNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| cassidylaidlaw/perceptual-advex | L2Distance | false | 15,010 | [
"MIT"
]
| 45 | d39136eb5b5e950442456ddade6b4f4fba3dd8f6 | https://github.com/cassidylaidlaw/perceptual-advex/tree/d39136eb5b5e950442456ddade6b4f4fba3dd8f6 |
GaussianConv2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/k4/ck4qv3kxa3iieaknofb7rptujn3fj2obyj755mxtsm3f3rqxtqie.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 4), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (16384*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cp/ccpzlma5no2kj4ugzbkym6souevoppcifwy3qnpnpuxnua3tyj3p.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 4), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (14400*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (3600*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(arg1_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(arg1_1, buf0, 16, 4096, grid=grid(16, 4096), stream=stream0)
del arg1_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 60, 60), (14400, 1, 240, 4))
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 4, 60, 60), (14400, 3600, 60, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf1, buf2, 16, 3600, grid=grid(16, 3600), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
from torch.nn.parameter import Parameter
class GaussianConv2d(nn.Module):
def __init__(self, in_channels, out_channels, ksize=5):
"""Applies 2-D Gaussian Blur.
Args:
in_channels: An integer indicates input channel dimension.
out_channels: An integer indicates output channel dimension.
ksize: An integer indicates Gaussian kernel size.
"""
super(GaussianConv2d, self).__init__()
weight = (np.arange(ksize, dtype=np.float32) - ksize // 2) ** 2
weight = np.sqrt(weight[None, :] + weight[:, None])
weight = np.reshape(weight, (1, 1, ksize, ksize)) / weight.sum()
self.weight = Parameter(torch.Tensor(weight).expand(out_channels, -
1, -1, -1))
self._in_channels = in_channels
self._out_channels = out_channels
def forward(self, x):
with torch.no_grad():
return F.conv2d(x, self.weight, groups=self._in_channels)
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
import torch.utils.data
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 14400 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 3600 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(arg1_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4096)](arg1_1, buf0, 16,
4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 60, 60), (14400, 1, 240, 4))
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 4, 60, 60), (14400, 3600, 60, 1),
torch.float32)
triton_poi_fused_convolution_1[grid(16, 3600)](buf1, buf2, 16, 3600,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf1
return buf2,
class GaussianConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, ksize=5):
"""Applies 2-D Gaussian Blur.
Args:
in_channels: An integer indicates input channel dimension.
out_channels: An integer indicates output channel dimension.
ksize: An integer indicates Gaussian kernel size.
"""
super(GaussianConv2dNew, self).__init__()
weight = (np.arange(ksize, dtype=np.float32) - ksize // 2) ** 2
weight = np.sqrt(weight[None, :] + weight[:, None])
weight = np.reshape(weight, (1, 1, ksize, ksize)) / weight.sum()
self.weight = Parameter(torch.Tensor(weight).expand(out_channels, -
1, -1, -1))
self._in_channels = in_channels
self._out_channels = out_channels
def forward(self, input_0):
arg0_1 = self.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| cenkbircanoglu/SPML | GaussianConv2d | false | 15,011 | [
"MIT"
]
| 68 | f09e4c30ecf2030d42ac70b2c35e7fdeee9bf468 | https://github.com/cenkbircanoglu/SPML/tree/f09e4c30ecf2030d42ac70b2c35e7fdeee9bf468 |
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nh/cnh6iem5ybdb3twbxsrnswdgv7527qrbvucjhftq2p4nhirlo2lo.py
# Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# distances => sum_1
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yu/cyuyrkbzs3boq5rfip75kejn76bd4kpeieqcygt7f7quogzu4q5u.py
# Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# losses => mul_3
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_2 => pow_2
# relu => relu
# sqrt => sqrt
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %sum_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, -1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-09), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %sqrt), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_3,), kwargs = {})
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1 = async_compile.triton('triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp0 * tmp3
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = 1e-09
tmp8 = tmp1 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 4.0
tmp11 = tmp10 - tmp9
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tmp13 * tmp13
tmp15 = tmp6 * tmp14
tmp16 = tmp2 + tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean]
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import *
from torch.distributions import *
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, output1, output2, target, size_average=True):
distances = (output2 - output1).pow(2).sum(1)
losses = 0.5 * (target.float() * distances + (1 + -1 * target).
float() * F.relu(self.margin - (distances + self.eps).sqrt()).
pow(2))
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.utils.data import *
from torch.distributions import *
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp0 * tmp3
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = 1e-09
tmp8 = tmp1 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 4.0
tmp11 = tmp10 - tmp9
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tmp13 * tmp13
tmp15 = tmp6 * tmp14
tmp16 = tmp2 + tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
self.eps = 1e-09
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| cgsas/LOB | ContrastiveLoss | false | 15,012 | [
"MIT"
]
| 97 | 4175912194c2a066b2d7df038a376484b57ed76c | https://github.com/cgsas/LOB/tree/4175912194c2a066b2d7df038a376484b57ed76c |
CombinedTargetMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3x/c3x4dfdpczepakwjr65svilah4l3hkarfrl226xahz32qv5ozznu.py
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
# Source node to ATen node mapping:
# heatmap_gt_1 => mul_1
# heatmap_pred_1 => mul
# loss => add
# loss_1 => add_1
# loss_2 => add_2
# mse_loss => mean, pow_1, sub
# mse_loss_1 => mean_1, pow_2, sub_1
# mse_loss_2 => mean_2, pow_3, sub_2
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %select), kwargs = {})
# %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %select_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_8), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1), kwargs = {})
triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp41, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0.run(buf3, arg0_1, arg2_1, arg1_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CombinedTargetMSELoss(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
"""
def __init__(self, use_target_weight):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_channels = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1
)
loss = 0.0
num_joints = num_channels // 3
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx * 3].squeeze()
heatmap_gt = heatmaps_gt[idx * 3].squeeze()
offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()
offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()
offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()
offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()
if self.use_target_weight:
heatmap_pred = heatmap_pred.mul(target_weight[:, idx])
heatmap_gt = heatmap_gt.mul(target_weight[:, idx])
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,
heatmap_gt * offset_x_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,
heatmap_gt * offset_y_gt)
return loss / num_joints
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf3, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class CombinedTargetMSELossNew(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
"""
def __init__(self, use_target_weight):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| chaowentao/mmpose | CombinedTargetMSELoss | false | 15,013 | [
"Apache-2.0"
]
| 367 | b528c60ef4fab56d35d1ed7e187023794639be26 | https://github.com/chaowentao/mmpose/tree/b528c60ef4fab56d35d1ed7e187023794639be26 |
MoEHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [contiguous_4], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_4 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vq/cvqpprnukykv7fb6t2uveui44qrapemorby5j3fnnfeymwpqwe63.py
# Topologically Sorted Source Nodes: [mix], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# mix => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wo/cwo7i4dcowaxozwnj57wqq4ba45uo7pev3igxxitkuqs52wnxctl.py
# Topologically Sorted Source Nodes: [mix_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# mix_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_20, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_20, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vw/cvwwlf5hh74femiy52pxilo5w77x22ndrh7cd3nkkzhhazqhimhy.py
# Topologically Sorted Source Nodes: [mix_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# mix_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zc/czc5gxgexoeqyqqwaoklwx3qouhcdrlfrm4bkph4g7kzrnbtjag5.py
# Topologically Sorted Source Nodes: [mix_1, mul, probs], Original ATen: [aten._softmax, aten.mul, aten.sum]
# Source node to ATen node mapping:
# mix_1 => div, sum_1
# mul => mul
# probs => sum_2
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_9, %div), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-2]), kwargs = {})
triton_poi_fused__softmax_mul_sum_4 = async_compile.triton('triton_poi_fused__softmax_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x5), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (1, 1), (1, 1))
assert_size_stride(primals_11, (1, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_4], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [contiguous_6], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_6, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [contiguous_8], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_9, buf6, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_9
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mix], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf10 = reinterpret_tensor(buf7, (64, 1), (1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (64, 1), (1, 0), 0), primals_10, alpha=1, beta=1, out=buf10)
del primals_11
buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [mix_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [mix_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [mix_1, mul, probs], Original ATen: [aten._softmax, aten.mul, aten.sum]
triton_poi_fused__softmax_mul_sum_4.run(buf5, buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [mix_1, mul, probs, outputs], Original ATen: [aten._softmax, aten.mul, aten.sum, aten.bmm]
extern_kernels.bmm(buf13, primals_7, out=buf14)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_13
return (reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0), primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf8, (64, 1), (1, 1), 0), buf10, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), primals_12, primals_10, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch.nn import functional as F
from torch.autograd import Variable
from torch import nn
def softmax(x):
if x.dim() == 3:
return F.softmax(x.transpose(0, 2)).transpose(0, 2)
return F.softmax(x)
def gumbel_softmax(input, beta=0.5, tau=1.0):
noise = input.data.new(*input.size()).uniform_()
noise.add_(TINY).log_().neg_().add_(TINY).log_().neg_()
return softmax((input + beta * Variable(noise)) / tau)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-1)).squeeze(-1)
def mask(targets, out, input_mask=None, return_mask=False):
if input_mask is None:
input_mask = targets != 1
out_mask = input_mask.unsqueeze(-1).expand_as(out)
if return_mask:
return targets[input_mask], out[out_mask].view(-1, out.size(-1)
), the_mask
return targets[input_mask], out[out_mask].view(-1, out.size(-1))
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal, diag=False, window=-1,
noisy=False):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
self.diag = diag
self.window = window
self.noisy = noisy
def forward(self, query, key, value=None, mask=None, feedback=None,
beta=0, tau=1, weights=None):
dot_products = matmul(query, key.transpose(1, 2))
if weights is not None:
dot_products = dot_products + weights
if query.dim() == 3 and self.causal and query.size(1) == key.size(1):
tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF
dot_products.data.sub_(tri.unsqueeze(0))
if self.window > 0:
window_mask = key.data.new(key.size(1), key.size(1)).fill_(1)
window_mask = (window_mask.triu(self.window + 1) + window_mask.
tril(-self.window - 1)) * INF
dot_products.data.sub_(window_mask.unsqueeze(0))
if self.diag:
inds = torch.arange(0, key.size(1)).long().view(1, 1, -1)
if key.is_cuda:
inds = inds
dot_products.data.scatter_(1, inds.expand(dot_products.size(0),
1, inds.size(-1)), -INF)
if mask is not None:
if dot_products.dim() == 2:
dot_products.data -= (1 - mask) * INF
else:
dot_products.data -= (1 - mask[:, None, :]) * INF
if value is None:
return dot_products
logits = dot_products / self.scale
if not self.noisy:
probs = softmax(logits)
else:
probs = gumbel_softmax(logits, beta=beta, tau=tau)
if feedback is not None:
feedback.append(probs.contiguous())
return matmul(self.dropout(probs), value)
class MoEHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False,
diag=False, window=-1, noisy=False, use_wo=True):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal, diag=
diag, window=window, noisy=noisy)
self.wq = Linear(d_key, d_key, bias=use_wo)
self.wk = Linear(d_key, d_key, bias=use_wo)
self.wv = Linear(d_value, d_value, bias=use_wo)
self.wo = Linear(d_value, d_key, bias=use_wo)
self.gate = Linear(d_value // n_heads, 1)
self.use_wo = use_wo
self.n_heads = n_heads
def forward(self, query, key, inputs, mask=None, feedback=None, weights
=None, beta=0, tau=1):
query, key, value = self.wq(query), self.wk(key), self.wv(inputs)
B, Tq, D = query.size()
_, Tk, _ = key.size()
N = self.n_heads
probs = []
query, key, value = (x.contiguous().view(B, -1, N, D // N).
transpose(2, 1).contiguous().view(B * N, -1, D // N) for x in (
query, key, value))
if mask is not None:
mask = mask[:, None, :].expand(B, N, Tk).contiguous().view(B *
N, -1)
probs = self.attention(query, key, None, mask, probs, beta, tau,
weights)
mix = matmul(self.attention.dropout(probs), value).contiguous().view(B,
N, -1, D // N).transpose(2, 1).contiguous()
mix = softmax(self.gate(mix))
probs = (probs.contiguous().view(B, N, Tq, Tk).transpose(2, 1) * mix
).sum(-2)
outputs = matmul(probs, inputs)
return self.wo(outputs)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_key': 4, 'd_value': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch.nn import functional as F
from torch.autograd import Variable
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (1, 1), (1, 1))
assert_size_stride(primals_11, (1,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_9, buf6, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(buf5, reinterpret_tensor(buf6, (16, 4, 1), (4, 1,
0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf7, (64, 1), (1, 1), 0)
del buf7
extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (64, 1),
(1, 0), 0), primals_10, alpha=1, beta=1, out=buf10)
del primals_11
buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0)
del buf11
triton_poi_fused__softmax_mul_sum_4[grid(64)](buf5, buf12, buf13,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
extern_kernels.bmm(buf13, primals_7, out=buf14)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf15)
del primals_13
return reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0
), primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf8, (64, 1), (1, 1), 0
), buf10, reinterpret_tensor(buf14, (16, 4), (4, 1), 0
), primals_12, primals_10, reinterpret_tensor(buf6, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0)
def softmax(x):
if x.dim() == 3:
return F.softmax(x.transpose(0, 2)).transpose(0, 2)
return F.softmax(x)
def gumbel_softmax(input, beta=0.5, tau=1.0):
noise = input.data.new(*input.size()).uniform_()
noise.add_(TINY).log_().neg_().add_(TINY).log_().neg_()
return softmax((input + beta * Variable(noise)) / tau)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-1)).squeeze(-1)
def mask(targets, out, input_mask=None, return_mask=False):
if input_mask is None:
input_mask = targets != 1
out_mask = input_mask.unsqueeze(-1).expand_as(out)
if return_mask:
return targets[input_mask], out[out_mask].view(-1, out.size(-1)
), the_mask
return targets[input_mask], out[out_mask].view(-1, out.size(-1))
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal, diag=False, window=-1,
noisy=False):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
self.diag = diag
self.window = window
self.noisy = noisy
def forward(self, query, key, value=None, mask=None, feedback=None,
beta=0, tau=1, weights=None):
dot_products = matmul(query, key.transpose(1, 2))
if weights is not None:
dot_products = dot_products + weights
if query.dim() == 3 and self.causal and query.size(1) == key.size(1):
tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF
dot_products.data.sub_(tri.unsqueeze(0))
if self.window > 0:
window_mask = key.data.new(key.size(1), key.size(1)).fill_(1)
window_mask = (window_mask.triu(self.window + 1) + window_mask.
tril(-self.window - 1)) * INF
dot_products.data.sub_(window_mask.unsqueeze(0))
if self.diag:
inds = torch.arange(0, key.size(1)).long().view(1, 1, -1)
if key.is_cuda:
inds = inds
dot_products.data.scatter_(1, inds.expand(dot_products.size(0),
1, inds.size(-1)), -INF)
if mask is not None:
if dot_products.dim() == 2:
dot_products.data -= (1 - mask) * INF
else:
dot_products.data -= (1 - mask[:, None, :]) * INF
if value is None:
return dot_products
logits = dot_products / self.scale
if not self.noisy:
probs = softmax(logits)
else:
probs = gumbel_softmax(logits, beta=beta, tau=tau)
if feedback is not None:
feedback.append(probs.contiguous())
return matmul(self.dropout(probs), value)
class MoEHeadNew(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False,
diag=False, window=-1, noisy=False, use_wo=True):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal, diag=
diag, window=window, noisy=noisy)
self.wq = Linear(d_key, d_key, bias=use_wo)
self.wk = Linear(d_key, d_key, bias=use_wo)
self.wv = Linear(d_value, d_value, bias=use_wo)
self.wo = Linear(d_value, d_key, bias=use_wo)
self.gate = Linear(d_value // n_heads, 1)
self.use_wo = use_wo
self.n_heads = n_heads
def forward(self, input_0, input_1, input_2):
primals_2 = self.wq.weight
primals_3 = self.wq.bias
primals_5 = self.wk.weight
primals_6 = self.wk.bias
primals_8 = self.wv.weight
primals_9 = self.wv.bias
primals_12 = self.wo.weight
primals_13 = self.wo.bias
primals_10 = self.gate.weight
primals_11 = self.gate.bias
primals_1 = input_0
primals_4 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| cclauss/nonauto-nmt | MoEHead | false | 15,014 | [
"BSD-3-Clause"
]
| 262 | efcbe4f2329b140ac3ce06abb6409457cebc8e49 | https://github.com/cclauss/nonauto-nmt/tree/efcbe4f2329b140ac3ce06abb6409457cebc8e49 |
InvertibleChannelMixing1D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/am/camznkwec2lbh6msqczu6byzeziacywdz5jsefl4uifgbyfrwx5b.py
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
# Source node to ATen node mapping:
# autograd_function_apply => add, eq, full_default, full_default_1, iota, sub_1, where
# skew_symmetric_matrix => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %permute), kwargs = {})
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %sub), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %sub), kwargs = {})
triton_poi_fused_add_eye_sub_0 = async_compile.triton('triton_poi_fused_add_eye_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + (4*y0)), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + (4*y0)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fv/cfvl2yytwjnvovj3pb564j4e36ftzegt465t5hi3f4s6v27gm42g.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %view, None, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_sub_0.run(primals_1, buf0, buf5, 4, 4, grid=grid(4, 4), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add, aten.linalg_lu_factor_ex]
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.linalg_lu_solve]
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf9, (1, 4, 4), (16, 4, 1))
del buf8
# Topologically Sorted Source Nodes: [], Original ATen: [aten.linalg_lu_solve]
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return (reinterpret_tensor(buf9, (4, 4), (4, 1), 0), buf5, reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 4), 0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
from torch import nn
from warnings import warn
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalChannelMixing(nn.Module):
"""Base class for all orthogonal channel mixing layers.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(OrthogonalChannelMixing, self).__init__()
self.in_channels = in_channels
self.weight = nn.Parameter(torch.zeros((in_channels, in_channels)),
requires_grad=learnable)
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self.kwargs = kwargs
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel_matrix_transposed(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return torch.transpose(self.kernel_matrix, -1, -2)
class InvertibleChannelMixing1D(OrthogonalChannelMixing):
"""Orthogonal (and hence invertible) channel mixing layer for 1D data.
This layer linearly combines the input channels to each output channel.
Here, the number of output channels is the same as the number of input
channels, and the matrix specifying the connectivity between the channels
is orthogonal.
:param in_channels:
The number of input (and output) channels.
:param method:
The chosen method for parametrizing the orthogonal matrix which
determines the orthogonal channel mixing. Either ``"exp"``, ``"cayley"``
or ``"householder"``.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(InvertibleChannelMixing1D, self).__init__(in_channels=
in_channels, method=method, learnable=learnable, **kwargs)
self.kwargs = kwargs
@property
def kernel(self):
return self.kernel_matrix.view(self.in_channels, self.in_channels, 1)
def forward(self, x):
return nn.functional.conv1d(x, self.kernel)
def inverse(self, x):
return nn.functional.conv_transpose1d(x, self.kernel)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
from torch import nn
from warnings import warn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + 4 * y0), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + 4 * y0), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_sub_0[grid(4, 4)](primals_1, buf0, buf5, 4,
4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0)
del buf0
triton_poi_fused_convolution_1[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4), (16, 4, 1), 0), buf8, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf9, (1, 4, 4), (16, 4, 1))
del buf8
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return reinterpret_tensor(buf9, (4, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf11
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalChannelMixing(nn.Module):
"""Base class for all orthogonal channel mixing layers.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(OrthogonalChannelMixing, self).__init__()
self.in_channels = in_channels
self.weight = nn.Parameter(torch.zeros((in_channels, in_channels)),
requires_grad=learnable)
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self.kwargs = kwargs
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel_matrix_transposed(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return torch.transpose(self.kernel_matrix, -1, -2)
class InvertibleChannelMixing1DNew(OrthogonalChannelMixing):
"""Orthogonal (and hence invertible) channel mixing layer for 1D data.
This layer linearly combines the input channels to each output channel.
Here, the number of output channels is the same as the number of input
channels, and the matrix specifying the connectivity between the channels
is orthogonal.
:param in_channels:
The number of input (and output) channels.
:param method:
The chosen method for parametrizing the orthogonal matrix which
determines the orthogonal channel mixing. Either ``"exp"``, ``"cayley"``
or ``"householder"``.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(InvertibleChannelMixing1DNew, self).__init__(in_channels=
in_channels, method=method, learnable=learnable, **kwargs)
self.kwargs = kwargs
@property
def kernel(self):
return self.kernel_matrix.view(self.in_channels, self.in_channels, 1)
def inverse(self, x):
return nn.functional.conv_transpose1d(x, self.kernel)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| cetmann/iunets | InvertibleChannelMixing1D | false | 15,015 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
InvertibleChannelMixing3D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/am/camznkwec2lbh6msqczu6byzeziacywdz5jsefl4uifgbyfrwx5b.py
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
# Source node to ATen node mapping:
# autograd_function_apply => add, eq, full_default, full_default_1, iota, sub_1, where
# skew_symmetric_matrix => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %permute), kwargs = {})
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %sub), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %sub), kwargs = {})
triton_poi_fused_add_eye_sub_0 = async_compile.triton('triton_poi_fused_add_eye_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + (4*y0)), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + (4*y0)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fv/cfvl2yytwjnvovj3pb564j4e36ftzegt465t5hi3f4s6v27gm42g.py
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv3d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %view, None, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_sub_0.run(primals_1, buf0, buf5, 4, 4, grid=grid(4, 4), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add, aten.linalg_lu_factor_ex]
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.linalg_lu_solve]
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf8, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf9, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
del buf8
# Topologically Sorted Source Nodes: [], Original ATen: [aten.linalg_lu_solve]
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return (reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5, reinterpret_tensor(buf7, (4, 4, 1, 1, 1), (1, 4, 4, 4, 4), 0), reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
from torch import nn
from warnings import warn
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalChannelMixing(nn.Module):
"""Base class for all orthogonal channel mixing layers.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(OrthogonalChannelMixing, self).__init__()
self.in_channels = in_channels
self.weight = nn.Parameter(torch.zeros((in_channels, in_channels)),
requires_grad=learnable)
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self.kwargs = kwargs
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel_matrix_transposed(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return torch.transpose(self.kernel_matrix, -1, -2)
class InvertibleChannelMixing3D(OrthogonalChannelMixing):
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(InvertibleChannelMixing3D, self).__init__(in_channels=
in_channels, method=method, learnable=learnable, **kwargs)
self.kwargs = kwargs
@property
def kernel(self):
return self.kernel_matrix.view(self.in_channels, self.in_channels,
1, 1, 1)
def forward(self, x):
return nn.functional.conv3d(x, self.kernel)
def inverse(self, x):
return nn.functional.conv_transpose3d(x, self.kernel)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
from torch import nn
from warnings import warn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + 4 * y0), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + 4 * y0), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_sub_0[grid(4, 4)](primals_1, buf0, buf5, 4,
4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0)
del buf0
triton_poi_fused_convolution_1[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf8, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf9, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
del buf8
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, reinterpret_tensor(buf7, (4, 4, 1, 1, 1), (1, 4, 4, 4, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4,
1), 0), buf11
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalChannelMixing(nn.Module):
"""Base class for all orthogonal channel mixing layers.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(OrthogonalChannelMixing, self).__init__()
self.in_channels = in_channels
self.weight = nn.Parameter(torch.zeros((in_channels, in_channels)),
requires_grad=learnable)
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self.kwargs = kwargs
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel_matrix_transposed(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return torch.transpose(self.kernel_matrix, -1, -2)
class InvertibleChannelMixing3DNew(OrthogonalChannelMixing):
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(InvertibleChannelMixing3DNew, self).__init__(in_channels=
in_channels, method=method, learnable=learnable, **kwargs)
self.kwargs = kwargs
@property
def kernel(self):
return self.kernel_matrix.view(self.in_channels, self.in_channels,
1, 1, 1)
def inverse(self, x):
return nn.functional.conv_transpose3d(x, self.kernel)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| cetmann/iunets | InvertibleChannelMixing3D | false | 15,016 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
InvertibleChannelMixing2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/am/camznkwec2lbh6msqczu6byzeziacywdz5jsefl4uifgbyfrwx5b.py
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
# Source node to ATen node mapping:
# autograd_function_apply => add, eq, full_default, full_default_1, iota, sub_1, where
# skew_symmetric_matrix => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %permute), kwargs = {})
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %sub), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %sub), kwargs = {})
triton_poi_fused_add_eye_sub_0 = async_compile.triton('triton_poi_fused_add_eye_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + (4*y0)), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + (4*y0)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fv/cfvl2yytwjnvovj3pb564j4e36ftzegt465t5hi3f4s6v27gm42g.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_2, %view, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_sub_0.run(primals_1, buf0, buf5, 4, 4, grid=grid(4, 4), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add, aten.linalg_lu_factor_ex]
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.linalg_lu_solve]
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(primals_2, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
del buf8
# Topologically Sorted Source Nodes: [], Original ATen: [aten.linalg_lu_solve]
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return (buf9, primals_2, buf5, reinterpret_tensor(buf7, (4, 4, 1, 1), (1, 4, 4, 4), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
from torch import nn
from warnings import warn
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalChannelMixing(nn.Module):
"""Base class for all orthogonal channel mixing layers.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(OrthogonalChannelMixing, self).__init__()
self.in_channels = in_channels
self.weight = nn.Parameter(torch.zeros((in_channels, in_channels)),
requires_grad=learnable)
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self.kwargs = kwargs
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel_matrix_transposed(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return torch.transpose(self.kernel_matrix, -1, -2)
class InvertibleChannelMixing2D(OrthogonalChannelMixing):
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(InvertibleChannelMixing2D, self).__init__(in_channels=
in_channels, method=method, learnable=learnable, **kwargs)
self.kwargs = kwargs
@property
def kernel(self):
return self.kernel_matrix.view(self.in_channels, self.in_channels, 1, 1
)
def forward(self, x):
return nn.functional.conv2d(x, self.kernel)
def inverse(self, x):
return nn.functional.conv_transpose2d(x, self.kernel)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
from torch import nn
from warnings import warn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + 4 * y0), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + 4 * y0), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_sub_0[grid(4, 4)](primals_1, buf0, buf5, 4,
4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
triton_poi_fused_convolution_1[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
buf9 = extern_kernels.convolution(primals_2, buf8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
del buf8
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return buf9, primals_2, buf5, reinterpret_tensor(buf7, (4, 4, 1, 1), (1,
4, 4, 4), 0), buf11
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalChannelMixing(nn.Module):
"""Base class for all orthogonal channel mixing layers.
"""
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(OrthogonalChannelMixing, self).__init__()
self.in_channels = in_channels
self.weight = nn.Parameter(torch.zeros((in_channels, in_channels)),
requires_grad=learnable)
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self.kwargs = kwargs
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel_matrix_transposed(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return torch.transpose(self.kernel_matrix, -1, -2)
class InvertibleChannelMixing2DNew(OrthogonalChannelMixing):
def __init__(self, in_channels: 'int', method: 'str'='cayley',
learnable: 'bool'=True, **kwargs):
super(InvertibleChannelMixing2DNew, self).__init__(in_channels=
in_channels, method=method, learnable=learnable, **kwargs)
self.kwargs = kwargs
@property
def kernel(self):
return self.kernel_matrix.view(self.in_channels, self.in_channels, 1, 1
)
def inverse(self, x):
return nn.functional.conv_transpose2d(x, self.kernel)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| cetmann/iunets | InvertibleChannelMixing2D | false | 15,017 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
homo_Gauss_mloglike | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2q/c2qxo42aochwcwzi676d2yucrbk5mmnenw5iwds5anqfvmysfxto.py
# Topologically Sorted Source Nodes: [var, log_scale, sub, pow_2, neg, mul, truediv, sub_1, sub_2, neg_1], Original ATen: [aten.pow, aten.log, aten.sub, aten.neg, aten.mul, aten.div]
# Source node to ATen node mapping:
# log_scale => log
# mul => mul
# neg => neg
# neg_1 => neg_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# truediv => div
# var => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand, 2), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%expand,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %primals_2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_2,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 2), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %mul), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %log), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, 0.9189385332046727), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_2,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, %mul), kwargs = {})
triton_poi_fused_div_log_mul_neg_pow_sub_0 = async_compile.triton('triton_poi_fused_div_log_mul_neg_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_log_mul_neg_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_log_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr2 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = 0.0001
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = 2.0
tmp12 = tmp10 * tmp11
tmp13 = tmp4 / tmp12
tmp14 = tl_math.log(tmp9)
tmp15 = tmp13 - tmp14
tmp16 = 0.9189385332046727
tmp17 = tmp15 - tmp16
tmp18 = -tmp17
tmp19 = tmp13 / tmp12
tl.store(out_ptr0 + (x0), tmp18, xmask)
tl.store(out_ptr1 + (x0), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [var, log_scale, sub, pow_2, neg, mul, truediv, sub_1, sub_2, neg_1], Original ATen: [aten.pow, aten.log, aten.sub, aten.neg, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_log_mul_neg_pow_sub_0.run(primals_3, primals_2, primals_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
import torch.nn as nn
import torch.optim
from torch.distributions import Normal
class homo_Gauss_mloglike(nn.Module):
def __init__(self, Ndims=1, sig=None):
super(homo_Gauss_mloglike, self).__init__()
if sig is None:
self.log_std = nn.Parameter(torch.zeros(Ndims))
else:
self.log_std = nn.Parameter(torch.ones(Ndims) * np.log(sig),
requires_grad=False)
def forward(self, mu, y, model_std=None):
sig = self.log_std.exp().clamp(min=0.0001)
if model_std is not None:
sig = (sig ** 2 + model_std ** 2) ** 0.5
dist = Normal(mu, sig)
return -dist.log_prob(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_log_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = -tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = 0.0001
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = 2.0
tmp12 = tmp10 * tmp11
tmp13 = tmp4 / tmp12
tmp14 = tl_math.log(tmp9)
tmp15 = tmp13 - tmp14
tmp16 = 0.9189385332046727
tmp17 = tmp15 - tmp16
tmp18 = -tmp17
tmp19 = tmp13 / tmp12
tl.store(out_ptr0 + x0, tmp18, xmask)
tl.store(out_ptr1 + x0, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_log_mul_neg_pow_sub_0[grid(256)](primals_3,
primals_2, primals_1, buf0, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1, buf1
class homo_Gauss_mloglikeNew(nn.Module):
def __init__(self, Ndims=1, sig=None):
super(homo_Gauss_mloglikeNew, self).__init__()
if sig is None:
self.log_std = nn.Parameter(torch.zeros(Ndims))
else:
self.log_std = nn.Parameter(torch.ones(Ndims) * np.log(sig),
requires_grad=False)
def forward(self, input_0, input_1):
primals_1 = self.log_std
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| chelsealuisa/DUN | homo_Gauss_mloglike | false | 15,018 | [
"MIT"
]
| 58 | 1ccd9bc49b91b13089350f003a25bdb11003d843 | https://github.com/chelsealuisa/DUN/tree/1ccd9bc49b91b13089350f003a25bdb11003d843 |
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lh/clhtaboxxs526aw4bqcb7s6xoig5vzwco55tfg6waaga3ao3elgd.py
# Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm]
# Source node to ATen node mapping:
# euclidean_distance => add, pow_1, pow_2, sub, sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused_add_norm_sub_0 = async_compile.triton('triton_poi_fused_add_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + (x0), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/eo/ceoal6iazsps7kf3y6tkqeelm2cum5e75gcguab7wpsfi6ftu5cm.py
# Topologically Sorted Source Nodes: [pow_1, mul, sub, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.clamp, aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add_1
# clamp => clamp_min
# loss_contrastive => mean
# mul => mul
# mul_1 => mul_1
# pow_1 => pow_3
# pow_2 => pow_4
# sub => sub_1
# sub_1 => sub_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %pow_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_clamp_mean_mul_pow_rsub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = 2.0
tmp7 = tmp6 - tmp1
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp5 * tmp10
tmp12 = tmp3 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm]
stream0 = get_raw_stream(0)
triton_poi_fused_add_norm_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [pow_1, mul, sub, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.clamp, aten.add, aten.mean]
triton_per_fused_add_clamp_mean_mul_pow_rsub_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.nn as nn
class ContrastiveLoss(nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean(label * torch.pow(euclidean_distance,
2) + (1 - label) * torch.pow(torch.clamp(self.margin -
euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = 2.0
tmp7 = tmp6 - tmp1
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp5 * tmp10
tmp12 = tmp3 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| chenyanghungry/person-reid-lib | ContrastiveLoss | false | 15,019 | [
"MIT"
]
| 81 | 783e66c9bfedf582e2cf935b9f5be960b543ac3c | https://github.com/chenyanghungry/person-reid-lib/tree/783e66c9bfedf582e2cf935b9f5be960b543ac3c |
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f4/cf4n4twxm5q5eh5lcjfpxlmjdbgr6xfkpjnkedxdctexycbqar7w.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (16, 8), (8, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 16), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf4, 1024, grid=grid(1024), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf3)
del primals_6
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(buf2, (64, 16), (16, 1), 0), primals_5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
class MLP(nn.Module):
def __init__(self, input_size, output_size, hidden_size=None, dropout=0.1):
super().__init__()
if hidden_size is None:
hidden_size = input_size * 4
self.w_1 = nn.Linear(input_size * 2, hidden_size)
self.w_2 = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x1, x2):
x = torch.cat([x1, x2], -1)
return self.w_2(self.dropout(torch.nn.functional.relu(self.w_1(x))))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (16, 8), (8, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 16), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(1024)](buf2,
primals_4, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(
buf2, (64, 16), (16, 1), 0), primals_5, buf4
class MLPNew(nn.Module):
def __init__(self, input_size, output_size, hidden_size=None, dropout=0.1):
super().__init__()
if hidden_size is None:
hidden_size = input_size * 4
self.w_1 = nn.Linear(input_size * 2, hidden_size)
self.w_2 = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1):
primals_3 = self.w_1.weight
primals_4 = self.w_1.bias
primals_5 = self.w_2.weight
primals_6 = self.w_2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| chenyangh/tensor2struct-public | MLP | false | 15,020 | [
"MIT"
]
| 69 | d3257cba6d76d3c658a58a78f687d986bdc755cf | https://github.com/chenyangh/tensor2struct-public/tree/d3257cba6d76d3c658a58a78f687d986bdc755cf |
InvertibleDownsampling2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ie/cievhzbypd2barockl7zctjvhchqmubjtb53zimkcicmeo57etun.py
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
# Source node to ATen node mapping:
# autograd_function_apply => add, eq, full_default, full_default_1, iota, sub_1, where
# skew_symmetric_matrix => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %permute), kwargs = {})
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %sub), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %sub), kwargs = {})
triton_poi_fused_add_eye_sub_0 = async_compile.triton('triton_poi_fused_add_eye_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 4
x2 = xindex
y3 = yindex
y1 = (yindex // 4)
tmp6 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp0 = y0
tmp1 = x2
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x2 + (4*y3)), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x2 + (4*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vq/cvqpprnukykv7fb6t2uveui44qrapemorby5j3fnnfeymwpqwe63.py
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# reshape => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%linalg_lu_solve,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_sub_0.run(primals_1, buf0, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add, aten.linalg_lu_factor_ex]
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.linalg_lu_solve]
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(primals_2, reinterpret_tensor(buf8, (16, 1, 2, 2), (4, 0, 2, 1), 0), stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf9, (4, 16, 2, 2), (64, 4, 2, 1))
# Topologically Sorted Source Nodes: [], Original ATen: [aten.linalg_lu_solve]
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
del buf7
buf11 = buf10
del buf10
return (buf9, primals_2, buf5, reinterpret_tensor(buf8, (16, 1, 2, 2), (4, 4, 2, 1), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_2_t
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleDownsampling2D(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_2_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_pair(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels * channel_multiplier
super(InvertibleDownsampling2D, self).__init__(*args,
low_channel_number=self.in_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def forward(self, x):
return F.conv2d(x, self.kernel, stride=self.stride, groups=self.
low_channel_number)
def inverse(self, x):
return F.conv_transpose2d(x, self.kernel, stride=self.stride,
groups=self.low_channel_number)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_2_t
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 4
x2 = xindex
y3 = yindex
y1 = yindex // 4
tmp6 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp0 = y0
tmp1 = x2
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x2 + 4 * y3), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_sub_0[grid(16, 4)](primals_1, buf0, buf5,
16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = buf0
del buf0
triton_poi_fused_clone_1[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = extern_kernels.convolution(primals_2, reinterpret_tensor(
buf8, (16, 1, 2, 2), (4, 0, 2, 1), 0), stride=(2, 2), padding=(
0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf9, (4, 16, 2, 2), (64, 4, 2, 1))
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
del buf7
buf11 = buf10
del buf10
return buf9, primals_2, buf5, reinterpret_tensor(buf8, (16, 1, 2, 2), (
4, 4, 2, 1), 0), buf11
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleDownsampling2DNew(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_2_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_pair(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels * channel_multiplier
super(InvertibleDownsampling2DNew, self).__init__(*args,
low_channel_number=self.in_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def inverse(self, x):
return F.conv_transpose2d(x, self.kernel, stride=self.stride,
groups=self.low_channel_number)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| cetmann/iunets | InvertibleDownsampling2D | false | 15,021 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [contiguous_4], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_4 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/q2/cq2bhrixv5xavijmo4bsnrvtws5kwvqhdummdlzij7fvpl7fcfb5.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, clone_3, exp, sub
# Graph fragment:
# %clone_3 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_3, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tr/ctrs4vqt4xt2a3fv3xpydavepoettmr7bp4kgam42ym3c6rbkdmc.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (y1 + (4*x2) + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + ((4*x2) + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x2) + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x2) + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x2) + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (y0 + (16*x2) + (64*y1)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py
# Topologically Sorted Source Nodes: [contiguous_11], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_11 => clone_6
# Graph fragment:
# %clone_6 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ax/cax3pk6x6l5syqxkefdzecacex2ylon7jzg2mhmvxe34ljwa4ruv.py
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => var
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_21), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True})
triton_poi_fused_add_mean_std_4 = async_compile.triton('triton_poi_fused_add_mean_std_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_std_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + (x0), tmp29, xmask)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y3/cy32znxw2njhr6yc77xwtlvqfalviu7qwl3inf6ltf7qaej3eo3k.py
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_1, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# mean => mean
# mul => mul
# std => sqrt
# sub => sub_1
# truediv_1 => div_2
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_21), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_10, %sub_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, %primals_11), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_5 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/up/cupeegv2vkfe3eb3ppbyhdjondrlqhr7i3aqig433xvb4hbhlnii.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_23,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5j/c5jq4slay2gzyh6whguivh7mf6amdb7fclvyghyk5gdqukw7fkho.py
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_3 => add_3
# Graph fragment:
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_25), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s3/cs3p3gyr7za5k6qu56xvzqdhttlsg5uumj34efqrojvxd5l6k4aa.py
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_2, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add_4 => add_4
# add_5 => add_5
# mean_2 => mean_1
# mul_1 => mul_1
# std_2 => sqrt_1, var_1
# sub_1 => sub_2
# truediv_2 => div_3
# Graph fragment:
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_3, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_3, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %mean_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_16, %sub_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-06), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_3, %primals_17), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_8 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_4], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [contiguous_6], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 16), (1, 4, 16), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 64, 4, grid=grid(64, 4), stream=stream0)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [contiguous_8], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (1, 16, 64), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_11], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_9
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = buf12; del buf12 # reuse
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
triton_poi_fused_add_mean_std_4.run(buf13, primals_1, buf11, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_1, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_5.run(primals_10, primals_1, buf11, buf14, buf13, primals_11, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_11
buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf16)
buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0); del buf16 # reuse
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_6.run(buf17, primals_13, buf21, 64, grid=grid(64), stream=stream0)
del primals_13
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf19, buf15, primals_15, 64, grid=grid(64), stream=stream0)
del primals_15
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_2, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_8.run(primals_16, buf19, primals_17, buf20, 64, grid=grid(64), stream=stream0)
del primals_17
return (buf20, primals_1, primals_10, primals_16, buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(buf17, (16, 4), (4, 1), 0), buf19, primals_14, buf21, primals_12, primals_8, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch.nn import functional as F
from torch.autograd import Variable
from torch import nn
def softmax(x):
if x.dim() == 3:
return F.softmax(x.transpose(0, 2)).transpose(0, 2)
return F.softmax(x)
def gumbel_softmax(input, beta=0.5, tau=1.0):
noise = input.data.new(*input.size()).uniform_()
noise.add_(TINY).log_().neg_().add_(TINY).log_().neg_()
return softmax((input + beta * Variable(noise)) / tau)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-1)).squeeze(-1)
def mask(targets, out, input_mask=None, return_mask=False):
if input_mask is None:
input_mask = targets != 1
out_mask = input_mask.unsqueeze(-1).expand_as(out)
if return_mask:
return targets[input_mask], out[out_mask].view(-1, out.size(-1)
), the_mask
return targets[input_mask], out[out_mask].view(-1, out.size(-1))
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio, pos=0):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
self.pos = pos
def forward(self, *x):
return self.layernorm(x[self.pos] + self.dropout(self.layer(*x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal, diag=False, window=-1,
noisy=False):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
self.diag = diag
self.window = window
self.noisy = noisy
def forward(self, query, key, value=None, mask=None, feedback=None,
beta=0, tau=1, weights=None):
dot_products = matmul(query, key.transpose(1, 2))
if weights is not None:
dot_products = dot_products + weights
if query.dim() == 3 and self.causal and query.size(1) == key.size(1):
tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF
dot_products.data.sub_(tri.unsqueeze(0))
if self.window > 0:
window_mask = key.data.new(key.size(1), key.size(1)).fill_(1)
window_mask = (window_mask.triu(self.window + 1) + window_mask.
tril(-self.window - 1)) * INF
dot_products.data.sub_(window_mask.unsqueeze(0))
if self.diag:
inds = torch.arange(0, key.size(1)).long().view(1, 1, -1)
if key.is_cuda:
inds = inds
dot_products.data.scatter_(1, inds.expand(dot_products.size(0),
1, inds.size(-1)), -INF)
if mask is not None:
if dot_products.dim() == 2:
dot_products.data -= (1 - mask) * INF
else:
dot_products.data -= (1 - mask[:, None, :]) * INF
if value is None:
return dot_products
logits = dot_products / self.scale
if not self.noisy:
probs = softmax(logits)
else:
probs = gumbel_softmax(logits, beta=beta, tau=tau)
if feedback is not None:
feedback.append(probs.contiguous())
return matmul(self.dropout(probs), value)
class MultiHead2(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False,
diag=False, window=-1, noisy=False, use_wo=True):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal, diag=
diag, window=window, noisy=noisy)
self.wq = Linear(d_key, d_key, bias=use_wo)
self.wk = Linear(d_key, d_key, bias=use_wo)
self.wv = Linear(d_value, d_value, bias=use_wo)
if use_wo:
self.wo = Linear(d_value, d_key, bias=use_wo)
self.use_wo = use_wo
self.n_heads = n_heads
def forward(self, query, key, value, mask=None, feedback=None, weights=
None, beta=0, tau=1):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
B, Tq, D = query.size()
_, Tk, _ = key.size()
N = self.n_heads
probs = []
query, key, value = (x.contiguous().view(B, -1, N, D // N).
transpose(2, 1).contiguous().view(B * N, -1, D // N) for x in (
query, key, value))
if mask is not None:
mask = mask[:, None, :].expand(B, N, Tk).contiguous().view(B *
N, -1)
outputs = self.attention(query, key, value, mask, probs, beta, tau,
weights)
outputs = outputs.contiguous().view(B, N, -1, D // N).transpose(2, 1
).contiguous().view(B, -1, D)
if feedback is not None:
feedback.append(probs[0].view(B, N, Tq, Tk))
if self.use_wo:
return self.wo(outputs)
return outputs
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = Linear(d_model, d_hidden)
self.linear2 = Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class EncoderLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.selfattn = ResidualBlock(MultiHead2(args.d_model, args.d_model,
args.n_heads, args.drop_ratio, use_wo=args.use_wo), args.
d_model, args.drop_ratio)
self.feedforward = ResidualBlock(FeedForward(args.d_model, args.
d_hidden), args.d_model, args.drop_ratio)
def forward(self, x, mask=None):
return self.feedforward(self.selfattn(x, x, x, mask))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(d_model=4, n_heads=4, drop_ratio=0.5,
use_wo=4, d_hidden=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch.nn import functional as F
from torch.autograd import Variable
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (y1 + 4 * x2 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_std_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 16), (1, 4, 16), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(64, 4)](buf6, buf7, 64, 4, XBLOCK=
4, YBLOCK=32, num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (1, 16, 64),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_9
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = buf12
del buf12
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_std_4[grid(16)](buf13, primals_1, buf11,
buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_5[grid(64)](primals_10,
primals_1, buf11, buf14, buf13, primals_11, buf15, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_11
buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf16)
buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0)
del buf16
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(64)](buf17,
primals_13, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_7[grid(64)](buf19, buf15, primals_15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_15
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_8[grid(64)](primals_16,
buf19, primals_17, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
return buf20, primals_1, primals_10, primals_16, buf7, reinterpret_tensor(
buf10, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf15, (16, 4
), (4, 1), 0), reinterpret_tensor(buf17, (16, 4), (4, 1), 0
), buf19, primals_14, buf21, primals_12, primals_8, reinterpret_tensor(
buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0)
def softmax(x):
if x.dim() == 3:
return F.softmax(x.transpose(0, 2)).transpose(0, 2)
return F.softmax(x)
def gumbel_softmax(input, beta=0.5, tau=1.0):
noise = input.data.new(*input.size()).uniform_()
noise.add_(TINY).log_().neg_().add_(TINY).log_().neg_()
return softmax((input + beta * Variable(noise)) / tau)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-1)).squeeze(-1)
def mask(targets, out, input_mask=None, return_mask=False):
if input_mask is None:
input_mask = targets != 1
out_mask = input_mask.unsqueeze(-1).expand_as(out)
if return_mask:
return targets[input_mask], out[out_mask].view(-1, out.size(-1)
), the_mask
return targets[input_mask], out[out_mask].view(-1, out.size(-1))
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio, pos=0):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
self.pos = pos
def forward(self, *x):
return self.layernorm(x[self.pos] + self.dropout(self.layer(*x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal, diag=False, window=-1,
noisy=False):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
self.diag = diag
self.window = window
self.noisy = noisy
def forward(self, query, key, value=None, mask=None, feedback=None,
beta=0, tau=1, weights=None):
dot_products = matmul(query, key.transpose(1, 2))
if weights is not None:
dot_products = dot_products + weights
if query.dim() == 3 and self.causal and query.size(1) == key.size(1):
tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF
dot_products.data.sub_(tri.unsqueeze(0))
if self.window > 0:
window_mask = key.data.new(key.size(1), key.size(1)).fill_(1)
window_mask = (window_mask.triu(self.window + 1) + window_mask.
tril(-self.window - 1)) * INF
dot_products.data.sub_(window_mask.unsqueeze(0))
if self.diag:
inds = torch.arange(0, key.size(1)).long().view(1, 1, -1)
if key.is_cuda:
inds = inds
dot_products.data.scatter_(1, inds.expand(dot_products.size(0),
1, inds.size(-1)), -INF)
if mask is not None:
if dot_products.dim() == 2:
dot_products.data -= (1 - mask) * INF
else:
dot_products.data -= (1 - mask[:, None, :]) * INF
if value is None:
return dot_products
logits = dot_products / self.scale
if not self.noisy:
probs = softmax(logits)
else:
probs = gumbel_softmax(logits, beta=beta, tau=tau)
if feedback is not None:
feedback.append(probs.contiguous())
return matmul(self.dropout(probs), value)
class MultiHead2(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False,
diag=False, window=-1, noisy=False, use_wo=True):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal, diag=
diag, window=window, noisy=noisy)
self.wq = Linear(d_key, d_key, bias=use_wo)
self.wk = Linear(d_key, d_key, bias=use_wo)
self.wv = Linear(d_value, d_value, bias=use_wo)
if use_wo:
self.wo = Linear(d_value, d_key, bias=use_wo)
self.use_wo = use_wo
self.n_heads = n_heads
def forward(self, query, key, value, mask=None, feedback=None, weights=
None, beta=0, tau=1):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
B, Tq, D = query.size()
_, Tk, _ = key.size()
N = self.n_heads
probs = []
query, key, value = (x.contiguous().view(B, -1, N, D // N).
transpose(2, 1).contiguous().view(B * N, -1, D // N) for x in (
query, key, value))
if mask is not None:
mask = mask[:, None, :].expand(B, N, Tk).contiguous().view(B *
N, -1)
outputs = self.attention(query, key, value, mask, probs, beta, tau,
weights)
outputs = outputs.contiguous().view(B, N, -1, D // N).transpose(2, 1
).contiguous().view(B, -1, D)
if feedback is not None:
feedback.append(probs[0].view(B, N, Tq, Tk))
if self.use_wo:
return self.wo(outputs)
return outputs
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = Linear(d_model, d_hidden)
self.linear2 = Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class EncoderLayerNew(nn.Module):
def __init__(self, args):
super().__init__()
self.selfattn = ResidualBlock(MultiHead2(args.d_model, args.d_model,
args.n_heads, args.drop_ratio, use_wo=args.use_wo), args.
d_model, args.drop_ratio)
self.feedforward = ResidualBlock(FeedForward(args.d_model, args.
d_hidden), args.d_model, args.drop_ratio)
def forward(self, input_0):
primals_2 = self.selfattn.layer.wq.weight
primals_3 = self.selfattn.layer.wq.bias
primals_4 = self.selfattn.layer.wk.weight
primals_5 = self.selfattn.layer.wk.bias
primals_6 = self.selfattn.layer.wv.weight
primals_7 = self.selfattn.layer.wv.bias
primals_8 = self.selfattn.layer.wo.weight
primals_9 = self.selfattn.layer.wo.bias
primals_10 = self.selfattn.layernorm.gamma
primals_11 = self.selfattn.layernorm.beta
primals_12 = self.feedforward.layer.linear1.weight
primals_13 = self.feedforward.layer.linear1.bias
primals_14 = self.feedforward.layer.linear2.weight
primals_15 = self.feedforward.layer.linear2.bias
primals_16 = self.feedforward.layernorm.gamma
primals_17 = self.feedforward.layernorm.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| cclauss/nonauto-nmt | EncoderLayer | false | 15,022 | [
"BSD-3-Clause"
]
| 262 | efcbe4f2329b140ac3ce06abb6409457cebc8e49 | https://github.com/cclauss/nonauto-nmt/tree/efcbe4f2329b140ac3ce06abb6409457cebc8e49 |
BatchHardTripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vz/cvze3fljh6rhhrgdjpucp2fx34lyfnaz3gerrro5vqcfayj32bzf.py
# Topologically Sorted Source Nodes: [dist_1, clamp, dist_2], Original ATen: [aten.add, aten.clamp, aten.sqrt]
# Source node to ATen node mapping:
# clamp => clamp_min
# dist_1 => add
# dist_2 => sqrt
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %permute), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %add), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_tensor, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
triton_poi_fused_add_clamp_sqrt_0 = async_compile.triton('triton_poi_fused_add_clamp_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_sqrt_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp11 + tmp22
tmp24 = tmp0 + tmp23
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = libdevice.sqrt(tmp26)
tl.store(in_out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6u/c6u63eskd3q5ina6jehpdqt7hsvou6sxz6dgaq2myhxy5qfsdvwr.py
# Topologically Sorted Source Nodes: [mask, eq_1], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq_1 => eq_1
# mask => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Tensor](args = (%expand_1, %permute_2), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%eq, 1), kwargs = {})
triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 == tmp4
tl.store(out_ptr0 + (x1 + (4*y0)), tmp2, xmask & ymask)
tl.store(out_ptr1 + (x1 + (4*y0)), tmp5, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = buf0; del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [dist_1, clamp, dist_2], Original ATen: [aten.add, aten.clamp, aten.sqrt]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_sqrt_0.run(buf2, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mask, eq_1], Original ATen: [aten.eq]
triton_poi_fused_eq_1.run(arg1_1, buf3, buf4, 4, 4, grid=grid(4, 4), stream=stream0)
del arg1_1
return (buf2, buf4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BatchHardTripletLoss(nn.Module):
def __init__(self, margin=0):
super(BatchHardTripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, inputs, targets):
batch_size = inputs.size(0)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(batch_size,
batch_size)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs, inputs.t())
dist = dist.clamp(min=1e-12).sqrt()
mask = targets.expand(batch_size, batch_size).eq(targets.expand(
batch_size, batch_size).t())
dist_ap = dist[mask == 1]
dist_ap = dist_ap.view(batch_size, -1)
dist_an = dist[mask == 0]
dist_an = dist_an.view(batch_size, -1)
dist_ap, _ = torch.max(dist_ap, dim=1)
dist_an, _ = torch.min(dist_an, dim=1)
y = torch.ones(dist_an.size(), dtype=dist_an.dtype, device=dist_an.
device)
loss = self.ranking_loss(dist_an, dist_ap, y)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_sqrt_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp11 + tmp22
tmp24 = tmp0 + tmp23
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = libdevice.sqrt(tmp26)
tl.store(in_out_ptr0 + x2, tmp27, xmask)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 == tmp4
tl.store(out_ptr0 + (x1 + 4 * y0), tmp2, xmask & ymask)
tl.store(out_ptr1 + (x1 + 4 * y0), tmp5, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_clamp_sqrt_0[grid(16)](buf2, arg0_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_eq_1[grid(4, 4)](arg1_1, buf3, buf4, 4, 4, XBLOCK=
4, YBLOCK=4, num_warps=1, num_stages=1)
del arg1_1
return buf2, buf4, buf3
class BatchHardTripletLossNew(nn.Module):
def __init__(self, margin=0):
super(BatchHardTripletLossNew, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| chenyanghungry/person-reid-lib | BatchHardTripletLoss | false | 15,023 | [
"MIT"
]
| 81 | 783e66c9bfedf582e2cf935b9f5be960b543ac3c | https://github.com/chenyanghungry/person-reid-lib/tree/783e66c9bfedf582e2cf935b9f5be960b543ac3c |
SelfAttentive | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yn/cyn75avd5lflbyn47bvz2yebt4cajzsnfmrfb4lylb7yai22gyi5.py
# Topologically Sorted Source Nodes: [hbar], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# hbar => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%mm,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py
# Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alphas_2 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alphas_2 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (200, 4), (4, 1))
assert_size_stride(primals_3, (1, 200), (200, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 200), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [hbar], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, 3200, grid=grid(3200), stream=stream0)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(primals_3, (200, 1), (1, 200), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0), primals_1, out=buf5)
return (buf5, reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0), primals_1, buf1, buf4, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 200), (200, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SelfAttentive(nn.Module):
def __init__(self, hidden_size, att_hops=1, att_unit=200, dropout=0.2):
super(SelfAttentive, self).__init__()
self.drop = nn.Dropout(dropout)
self.ws1 = nn.Linear(hidden_size, att_unit, bias=False)
self.ws2 = nn.Linear(att_unit, att_hops, bias=False)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
self.attention_hops = att_hops
def forward(self, rnn_out, mask=None):
outp = rnn_out
size = outp.size()
compressed_embeddings = outp.reshape(-1, size[2])
hbar = self.tanh(self.ws1(self.drop(compressed_embeddings)))
alphas = self.ws2(hbar).view(size[0], size[1], -1)
alphas = torch.transpose(alphas, 1, 2).contiguous()
if mask is not None:
mask = mask.squeeze(2)
concatenated_mask = [mask for i in range(self.attention_hops)]
concatenated_mask = torch.cat(concatenated_mask, 1)
penalized_alphas = alphas + concatenated_mask
else:
penalized_alphas = alphas
alphas = self.softmax(penalized_alphas.view(-1, size[1]))
alphas = alphas.view(size[0], self.attention_hops, size[1])
return torch.bmm(alphas, outp), alphas
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (200, 4), (4, 1))
assert_size_stride(primals_3, (1, 200), (200, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 200), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(3200)](buf1, 3200, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_3, (200, 1), (1,
200), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0
), primals_1, out=buf5)
return buf5, reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0
), primals_1, buf1, buf4, primals_3
class SelfAttentiveNew(nn.Module):
def __init__(self, hidden_size, att_hops=1, att_unit=200, dropout=0.2):
super(SelfAttentiveNew, self).__init__()
self.drop = nn.Dropout(dropout)
self.ws1 = nn.Linear(hidden_size, att_unit, bias=False)
self.ws2 = nn.Linear(att_unit, att_hops, bias=False)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
self.attention_hops = att_hops
def forward(self, input_0):
primals_2 = self.ws1.weight
primals_3 = self.ws2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| chenyangh/SemEval2019-Task3 | SelfAttentive | false | 15,024 | [
"MIT"
]
| 50 | c6204797b4b6cc08cb4d2d88108405f959d63ee9 | https://github.com/chenyangh/SemEval2019-Task3/tree/c6204797b4b6cc08cb4d2d88108405f959d63ee9 |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wh/cwhxpubmk5zws2dc5yu2mtk5t66jhcyf3sszeupz6cdsjmpn73hu.py
# Topologically Sorted Source Nodes: [mul, logits], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# logits => sum_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %view_3), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x1 = (xindex // 16) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2u/c2us6x3ktpcrct3uopvyui6pf6vesymer4fsjthamh4nxyvchsyq.py
# Topologically Sorted Source Nodes: [mul_1, sub, mul_2, mul_3, logits_1, scores], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax]
# Source node to ATen node mapping:
# logits_1 => add
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# scores => amax, exp, sub_1, sum_2
# sub => sub
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %primals_9), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_9), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %sub), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, -4294967296.0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_rsub_1 = async_compile.triton('triton_poi_fused__softmax_add_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp25 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp26 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = tmp0 * tmp4
tmp6 = -4294967296.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp11 = tmp9 * tmp10
tmp12 = tmp3 - tmp10
tmp13 = tmp9 * tmp12
tmp14 = tmp13 * tmp6
tmp15 = tmp11 + tmp14
tmp16 = triton_helpers.maximum(tmp8, tmp15)
tmp19 = tmp17 * tmp18
tmp20 = tmp3 - tmp18
tmp21 = tmp17 * tmp20
tmp22 = tmp21 * tmp6
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp16, tmp23)
tmp27 = tmp25 * tmp26
tmp28 = tmp3 - tmp26
tmp29 = tmp25 * tmp28
tmp30 = tmp29 * tmp6
tmp31 = tmp27 + tmp30
tmp32 = triton_helpers.maximum(tmp24, tmp31)
tmp33 = tmp8 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp15 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp23 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp31 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tl.store(out_ptr0 + (x2), tmp32, xmask)
tl.store(out_ptr1 + (x2), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p7/cp7nxjumo4bvj76d7arisjs63wyrrdwtiwnutx5td6pqmq5unvip.py
# Topologically Sorted Source Nodes: [mul_4, output], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul_4 => mul_4
# output => sum_3
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %view_5), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x3), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr4 + (64 + x3), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr4 + (128 + x3), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr4 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = tmp0 * tmp4
tmp6 = -4294967296.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tmp15 = tmp13 * tmp14
tmp18 = tmp16 * tmp17
tmp19 = tmp3 - tmp17
tmp20 = tmp16 * tmp19
tmp21 = tmp20 * tmp6
tmp22 = tmp18 + tmp21
tmp23 = tmp22 - tmp9
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp24 / tmp12
tmp27 = tmp25 * tmp26
tmp28 = tmp15 + tmp27
tmp31 = tmp29 * tmp30
tmp32 = tmp3 - tmp30
tmp33 = tmp29 * tmp32
tmp34 = tmp33 * tmp6
tmp35 = tmp31 + tmp34
tmp36 = tmp35 - tmp9
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp37 / tmp12
tmp40 = tmp38 * tmp39
tmp41 = tmp28 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp3 - tmp43
tmp46 = tmp42 * tmp45
tmp47 = tmp46 * tmp6
tmp48 = tmp44 + tmp47
tmp49 = tmp48 - tmp9
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp12
tmp53 = tmp51 * tmp52
tmp54 = tmp41 + tmp53
tl.store(out_ptr0 + (x4), tmp54, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, logits], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(buf0, buf1, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, sub, mul_2, mul_3, logits_1, scores], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_rsub_1.run(buf3, primals_9, buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_4, output], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_2.run(buf3, primals_9, buf4, buf5, buf2, buf6, 256, grid=grid(256), stream=stream0)
del buf3
del buf4
del buf5
return (buf6, primals_9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn
class Attention(nn.Module):
def __init__(self, dim_i, dim_o):
"""
build the target-aware attention
input schema:
dim_i: the dimension of the input feature vector
dim_o: the dimension of the output feature vector
output schema:
return a aggregated vector from the context k, v of q
"""
super(Attention, self).__init__()
self.Q = nn.Linear(dim_i, dim_o)
self.K = nn.Linear(dim_i, dim_o)
self.V = nn.Linear(dim_i, dim_o)
def forward(self, hist_seq_emb, hist_seq_mask, cand_emb):
q, k, v = self.Q(cand_emb), self.K(hist_seq_emb), self.V(hist_seq_emb)
logits = torch.sum(q.unsqueeze(1) * k, dim=2)
logits = logits * hist_seq_mask + logits * (1 - hist_seq_mask
) * -2 ** 32.0
scores = torch.softmax(logits, dim=1)
output = torch.sum(scores.unsqueeze(2) * v, dim=1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_i': 4, 'dim_o': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp25 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = tmp0 * tmp4
tmp6 = -4294967296.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp11 = tmp9 * tmp10
tmp12 = tmp3 - tmp10
tmp13 = tmp9 * tmp12
tmp14 = tmp13 * tmp6
tmp15 = tmp11 + tmp14
tmp16 = triton_helpers.maximum(tmp8, tmp15)
tmp19 = tmp17 * tmp18
tmp20 = tmp3 - tmp18
tmp21 = tmp17 * tmp20
tmp22 = tmp21 * tmp6
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp16, tmp23)
tmp27 = tmp25 * tmp26
tmp28 = tmp3 - tmp26
tmp29 = tmp25 * tmp28
tmp30 = tmp29 * tmp6
tmp31 = tmp27 + tmp30
tmp32 = triton_helpers.maximum(tmp24, tmp31)
tmp33 = tmp8 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp15 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp23 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp31 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tl.store(out_ptr0 + x2, tmp32, xmask)
tl.store(out_ptr1 + x2, tmp43, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr4 + (64 + x3), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr4 + (128 + x3), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp52 = tl.load(in_ptr4 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = tmp0 * tmp4
tmp6 = -4294967296.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tmp15 = tmp13 * tmp14
tmp18 = tmp16 * tmp17
tmp19 = tmp3 - tmp17
tmp20 = tmp16 * tmp19
tmp21 = tmp20 * tmp6
tmp22 = tmp18 + tmp21
tmp23 = tmp22 - tmp9
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp24 / tmp12
tmp27 = tmp25 * tmp26
tmp28 = tmp15 + tmp27
tmp31 = tmp29 * tmp30
tmp32 = tmp3 - tmp30
tmp33 = tmp29 * tmp32
tmp34 = tmp33 * tmp6
tmp35 = tmp31 + tmp34
tmp36 = tmp35 - tmp9
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp37 / tmp12
tmp40 = tmp38 * tmp39
tmp41 = tmp28 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp3 - tmp43
tmp46 = tmp42 * tmp45
tmp47 = tmp46 * tmp6
tmp48 = tmp44 + tmp47
tmp49 = tmp48 - tmp9
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp12
tmp53 = tmp51 * tmp52
tmp54 = tmp41 + tmp53
tl.store(out_ptr0 + x4, tmp54, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(256)](buf0, buf1, buf3, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_add_mul_rsub_1[grid(64)](buf3, primals_9,
buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(256)](buf3, primals_9, buf4, buf5,
buf2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del buf4
del buf5
return buf6, primals_9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, buf2
class AttentionNew(nn.Module):
def __init__(self, dim_i, dim_o):
"""
build the target-aware attention
input schema:
dim_i: the dimension of the input feature vector
dim_o: the dimension of the output feature vector
output schema:
return a aggregated vector from the context k, v of q
"""
super(AttentionNew, self).__init__()
self.Q = nn.Linear(dim_i, dim_o)
self.K = nn.Linear(dim_i, dim_o)
self.V = nn.Linear(dim_i, dim_o)
def forward(self, input_0, input_1, input_2):
primals_1 = self.Q.weight
primals_2 = self.Q.bias
primals_4 = self.K.weight
primals_5 = self.K.bias
primals_7 = self.V.weight
primals_8 = self.V.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| chencsgit/luoxi_models | Attention | false | 15,025 | [
"Apache-2.0"
]
| 58 | ea9e69dfb81b29f41ed92c75faacf81114c69a2f | https://github.com/chencsgit/luoxi_models/tree/ea9e69dfb81b29f41ed92c75faacf81114c69a2f |
PoseNormalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vc/cvc6dgar7eitn3n3sckla6kpxsymhplonsoe3mx3dxt3uutqddlf.py
# Topologically Sorted Source Nodes: [mul, sub], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul => mul
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
triton_poi_fused_mul_sub_0 = async_compile.triton('triton_poi_fused_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 - tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub], Original ATen: [aten.mul, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PoseNormalize(nn.Module):
@torch.no_grad()
def forward(self, x):
return x * 2 - 1
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 - tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PoseNormalizeNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| chinitaberrio/DeepPrivacy | PoseNormalize | false | 15,026 | [
"MIT"
]
| 1,128 | d50e1b5ae762b47ab5a8f54cb90e66465057bd25 | https://github.com/chinitaberrio/DeepPrivacy/tree/d50e1b5ae762b47ab5a8f54cb90e66465057bd25 |
InvertibleDownsampling3D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3f/c3f5tij5hlzvbpsxnw7aoplqe4benzzxgbti6r7oingysdpjz4pa.py
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
# Source node to ATen node mapping:
# autograd_function_apply => add, eq, full_default, full_default_1, iota, sub_1, where
# skew_symmetric_matrix => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %permute), kwargs = {})
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %sub), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %sub), kwargs = {})
triton_poi_fused_add_eye_sub_0 = async_compile.triton('triton_poi_fused_add_eye_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 8], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 8
x2 = xindex
y3 = yindex
y1 = (yindex // 8)
tmp6 = tl.load(in_ptr0 + (x2 + (8*y3)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (y0 + (8*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp0 = y0
tmp1 = x2
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x2 + (8*y3)), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x2 + (8*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xj/cxjoyt2oflrdhq4nv3yeaoknz7dctly7mqs2ohby27rllje5nydp.py
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# reshape => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%linalg_lu_solve,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 8], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 8
y1 = (yindex // 8)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (8*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (8*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 8, 8), (64, 8, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
buf5 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_sub_0.run(primals_1, buf0, buf5, 32, 8, grid=grid(32, 8), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add, aten.linalg_lu_factor_ex]
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.linalg_lu_solve]
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf7, buf8, 32, 8, grid=grid(32, 8), stream=stream0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf8, (32, 1, 2, 2, 2), (8, 0, 4, 2, 1), 0), stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=4, bias=None)
assert_size_stride(buf9, (1, 32, 2, 2, 2), (256, 8, 4, 2, 1))
# Topologically Sorted Source Nodes: [], Original ATen: [aten.linalg_lu_solve]
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
del buf7
buf11 = buf10
del buf10
return (reinterpret_tensor(buf9, (32, 2, 2, 2), (8, 4, 2, 1), 0), buf5, reinterpret_tensor(buf8, (32, 1, 2, 2, 2), (8, 8, 4, 2, 1), 0), reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 8, 8), (64, 8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_3_t
from torch.nn.modules.utils import _triple
import torch.nn.functional as F
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleDownsampling3D(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_3_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_triple(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels * channel_multiplier
super(InvertibleDownsampling3D, self).__init__(*args,
low_channel_number=self.in_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def forward(self, x):
return F.conv3d(x, self.kernel, stride=self.stride, groups=self.
low_channel_number)
def inverse(self, x):
return F.conv_transpose3d(x, self.kernel, stride=self.stride,
groups=self.low_channel_number)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_3_t
from torch.nn.modules.utils import _triple
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 32
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 8
x2 = xindex
y3 = yindex
y1 = yindex // 8
tmp6 = tl.load(in_ptr0 + (x2 + 8 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (y0 + 8 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp0 = y0
tmp1 = x2
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x2 + 8 * y3), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x2 + 8 * y3), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 32
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 8
y1 = yindex // 8
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 8 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 8, 8), (64, 8, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
buf5 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_sub_0[grid(32, 8)](primals_1, buf0, buf5,
32, 8, XBLOCK=8, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = buf0
del buf0
triton_poi_fused_clone_1[grid(32, 8)](buf7, buf8, 32, 8, XBLOCK=8,
YBLOCK=32, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf8,
(32, 1, 2, 2, 2), (8, 0, 4, 2, 1), 0), stride=(2, 2, 2),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=4, bias=None)
assert_size_stride(buf9, (1, 32, 2, 2, 2), (256, 8, 4, 2, 1))
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
del buf7
buf11 = buf10
del buf10
return reinterpret_tensor(buf9, (32, 2, 2, 2), (8, 4, 2, 1), 0
), buf5, reinterpret_tensor(buf8, (32, 1, 2, 2, 2), (8, 8, 4, 2, 1), 0
), reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4,
1), 0), buf11
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleDownsampling3DNew(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_3_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_triple(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels * channel_multiplier
super(InvertibleDownsampling3DNew, self).__init__(*args,
low_channel_number=self.in_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def inverse(self, x):
return F.conv_transpose3d(x, self.kernel, stride=self.stride,
groups=self.low_channel_number)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| cetmann/iunets | InvertibleDownsampling3D | false | 15,027 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
ToyNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ss/cssuid2za6l3t6z6jhvuqgy63frxdtddkskc2r2k67yzve3ef7oh.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 18
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ii/ciifw6ejievyknbbyu53i2s3l7kruokernfp2lliv26itbvfkszy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 65536], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 46656
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (46656*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (139968*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fh/cfhzti7vza4rqsbnjlbhnd7rs5cbzzlbferwcf4fensw4dwwokcd.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 6
y1 = (yindex // 6)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (6*x2) + (150*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/m3/cm3edyyxf7oa4mnzdspswulo3so3gqxy52w3el5wkvgz6chncdf7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (16*x2) + (144*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/i5/ci5m7nhckffd3gacyfsu5iumr7vpbe4uy62eh3okvpyt7343vo33.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fy/cfyrmkinq2sfin6x4qvoa3n2dwnh3oz64cste7zd34frcv3ck5bu.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1078656
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/og/cogjyrfqbebat52nswtr7k3imreoev5ppts7qzr4tjx2akkwsvia.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 269664
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 106
x2 = (xindex // 636)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (12*x1) + (2544*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (6 + x0 + (12*x1) + (2544*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (1272 + x0 + (12*x1) + (2544*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (1278 + x0 + (12*x1) + (2544*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ah/cahno2hlhsvn7q2dofufqoiuhkwicle37c2irmx7davtfipelwcc.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 665856
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2w/c2wcahebktiisch4znkhzwmvmb6wgibzocsqc6fgjyn7pcbgqzkx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 166464
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16) % 51
x2 = (xindex // 816)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (32*x1) + (3264*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (32*x1) + (3264*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (1632 + x0 + (32*x1) + (3264*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (1648 + x0 + (32*x1) + (3264*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tg/ctghs6yx5evywfbq7diehxmourw36mizsnciwds57nrglc4sbceu.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 614656
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4s/c4s5nqjwv5yras4ui7rqrjlxllxb2ytul5vp3ydguudhdhdjoq2f.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64) % 24
x2 = (xindex // 1536) % 24
x3 = (xindex // 36864)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (6272*x2) + (153664*x3)), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (6272*x2) + (153664*x3)), None)
tmp3 = tl.load(in_ptr0 + (3136 + x0 + (128*x1) + (6272*x2) + (153664*x3)), None)
tmp5 = tl.load(in_ptr0 + (3200 + x0 + (128*x1) + (6272*x2) + (153664*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4), tmp6, None)
tl.store(out_ptr1 + (x4), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ss/cssvgsnxsafik5ogualyatxnba3zvgoslmwfygfebb5vj3j55iyd.py
# Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# relu_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 495616
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6o/c6oimbskbisgv44vip2hg2iaqnurzpoafhvisxwdrwbbcwel2jre.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => _low_memory_max_pool2d_with_offsets_3, getitem_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 484
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 11
y1 = (yindex // 11)
y5 = yindex
y4 = (yindex // 121)
y6 = yindex % 121
tmp0 = tl.load(in_ptr0 + (x2 + (512*y0) + (11264*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x2 + (512*y0) + (11264*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (5632 + x2 + (512*y0) + (11264*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (5888 + x2 + (512*y0) + (11264*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (256*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (121*x2) + (30976*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hk/chkw5jgzb7mkhgtfyhffxvauygekn7r724ucig55fevwupi56ca7.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_13 = async_compile.triton('triton_poi_fused_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lw/clwuiyzq3ao7k7v64jwojjly4qy5ejem2v6mqu6m2sf5pz4gwuz6.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_6 => relu_5
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_13), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_14 = async_compile.triton('triton_poi_fused_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pi/cpivfqf2nlgqrfodre56wl3f2gvkx7fdgjiee3vpxxs4luvjkldm.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_7 => relu_6
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_15 = async_compile.triton('triton_poi_fused_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 216, 216), (139968, 46656, 216, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (2048, 30976), (30976, 1))
assert_size_stride(primals_11, (2048, ), (1, ))
assert_size_stride(primals_12, (512, 2048), (2048, 1))
assert_size_stride(primals_13, (512, ), (1, ))
assert_size_stride(primals_14, (128, 512), (512, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (2, 128), (128, 1))
assert_size_stride(primals_17, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((6, 3, 5, 5), (75, 1, 15, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 18, 25, grid=grid(18, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 216, 216), (139968, 1, 648, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 46656, grid=grid(12, 46656), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 96, 25, grid=grid(96, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 16, 3, 3), (144, 1, 48, 16), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 1024, 9, grid=grid(1024, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 6, 212, 212), (269664, 1, 1272, 6))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 1078656, grid=grid(1078656), stream=stream0)
del primals_2
buf7 = empty_strided_cuda((4, 6, 106, 106), (67416, 1, 636, 6), torch.float32)
buf8 = empty_strided_cuda((4, 6, 106, 106), (67416, 1, 636, 6), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_6.run(buf6, buf7, buf8, 269664, grid=grid(269664), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 16, 102, 102), (166464, 1, 1632, 16))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 665856, grid=grid(665856), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 16, 51, 51), (41616, 1, 816, 16), torch.float32)
buf12 = empty_strided_cuda((4, 16, 51, 51), (41616, 1, 816, 16), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 166464, grid=grid(166464), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 49, 49), (153664, 1, 3136, 64))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf14, primals_7, 614656, grid=grid(614656), stream=stream0)
del primals_7
buf15 = empty_strided_cuda((4, 64, 24, 24), (36864, 1, 1536, 64), torch.float32)
buf16 = empty_strided_cuda((4, 64, 24, 24), (36864, 1, 1536, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf14, buf15, buf16, 147456, grid=grid(147456), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 22, 22), (123904, 1, 5632, 256))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf18, primals_9, 495616, grid=grid(495616), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((4, 256, 11, 11), (30976, 1, 2816, 256), torch.int8)
buf20 = empty_strided_cuda((4, 256, 11, 11), (30976, 121, 11, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf18, buf19, buf20, 484, 256, grid=grid(484, 256), stream=stream0)
buf21 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (4, 30976), (30976, 1), 0), reinterpret_tensor(primals_10, (30976, 2048), (1, 30976), 0), out=buf21)
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_13.run(buf22, primals_11, 8192, grid=grid(8192), stream=stream0)
del primals_11
buf23 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf22, reinterpret_tensor(primals_12, (2048, 512), (1, 2048), 0), out=buf23)
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
triton_poi_fused_relu_14.run(buf24, primals_13, 2048, grid=grid(2048), stream=stream0)
del primals_13
buf25 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf24, reinterpret_tensor(primals_14, (512, 128), (1, 512), 0), out=buf25)
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
triton_poi_fused_relu_15.run(buf26, primals_15, 512, grid=grid(512), stream=stream0)
del primals_15
buf27 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, buf26, reinterpret_tensor(primals_16, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf27)
del primals_17
return (buf27, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor(buf20, (4, 30976), (30976, 1), 0), buf22, buf24, buf26, primals_16, primals_14, primals_12, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 216, 216), (139968, 46656, 216, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2048, 30976), (30976, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((512, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((2, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ToyNet(nn.Module):
def __init__(self):
super(ToyNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.conv3 = nn.Conv2d(16, 64, 3)
self.conv4 = nn.Conv2d(64, 256, 3)
self.fc0 = nn.Linear(256 * 11 * 11, 2048)
self.fc1 = nn.Linear(2048, 512)
self.fc2 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
x = x.view(-1, 256 * 11 * 11)
x = F.relu(self.fc0(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 216, 216])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 18
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 46656
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 46656 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 139968 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 6
y1 = yindex // 6
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 6 * x2 + 150 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1078656
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 269664
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 106
x2 = xindex // 636
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1 + 2544 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (6 + x0 + 12 * x1 + 2544 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1272 + x0 + 12 * x1 + 2544 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (1278 + x0 + 12 * x1 + 2544 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 665856
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 166464
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 51
x2 = xindex // 816
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1 + 3264 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1 + 3264 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1632 + x0 + 32 * x1 + 3264 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (1648 + x0 + 32 * x1 + 3264 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 614656
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 24
x2 = xindex // 1536 % 24
x3 = xindex // 36864
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 6272 * x2 + 153664 * x3), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 6272 * x2 + 153664 * x3),
None)
tmp3 = tl.load(in_ptr0 + (3136 + x0 + 128 * x1 + 6272 * x2 + 153664 *
x3), None)
tmp5 = tl.load(in_ptr0 + (3200 + x0 + 128 * x1 + 6272 * x2 + 153664 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, None)
tl.store(out_ptr1 + x4, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 484
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 11
y1 = yindex // 11
y5 = yindex
y4 = yindex // 121
y6 = yindex % 121
tmp0 = tl.load(in_ptr0 + (x2 + 512 * y0 + 11264 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x2 + 512 * y0 + 11264 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (5632 + x2 + 512 * y0 + 11264 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (5888 + x2 + 512 * y0 + 11264 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 256 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 121 * x2 + 30976 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 216, 216), (139968, 46656, 216, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (2048, 30976), (30976, 1))
assert_size_stride(primals_11, (2048,), (1,))
assert_size_stride(primals_12, (512, 2048), (2048, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (128, 512), (512, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (2, 128), (128, 1))
assert_size_stride(primals_17, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((6, 3, 5, 5), (75, 1, 15, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(18, 25)](primals_1, buf0, 18, 25, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 216, 216), (139968, 1, 648, 3),
torch.float32)
triton_poi_fused_1[grid(12, 46656)](primals_3, buf1, 12, 46656,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32
)
triton_poi_fused_2[grid(96, 25)](primals_4, buf2, 96, 25, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_3[grid(1024, 9)](primals_6, buf3, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 6, 212, 212), (269664, 1, 1272, 6))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(1078656)](buf6, primals_2,
1078656, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf7 = empty_strided_cuda((4, 6, 106, 106), (67416, 1, 636, 6),
torch.float32)
buf8 = empty_strided_cuda((4, 6, 106, 106), (67416, 1, 636, 6),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(269664)](buf6, buf7,
buf8, 269664, XBLOCK=512, num_warps=8, num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 16, 102, 102), (166464, 1, 1632, 16))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(665856)](buf10, primals_5,
665856, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 16, 51, 51), (41616, 1, 816, 16),
torch.float32)
buf12 = empty_strided_cuda((4, 16, 51, 51), (41616, 1, 816, 16),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(166464)](buf10,
buf11, buf12, 166464, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 49, 49), (153664, 1, 3136, 64))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_9[grid(614656)](buf14, primals_7,
614656, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((4, 64, 24, 24), (36864, 1, 1536, 64),
torch.float32)
buf16 = empty_strided_cuda((4, 64, 24, 24), (36864, 1, 1536, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(147456)](buf14,
buf15, buf16, 147456, XBLOCK=1024, num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 22, 22), (123904, 1, 5632, 256))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_11[grid(495616)](buf18, primals_9,
495616, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 256, 11, 11), (30976, 1, 2816, 256),
torch.int8)
buf20 = empty_strided_cuda((4, 256, 11, 11), (30976, 121, 11, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(484, 256)](buf18,
buf19, buf20, 484, 256, XBLOCK=256, YBLOCK=4, num_warps=4,
num_stages=1)
buf21 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (4, 30976), (30976, 1),
0), reinterpret_tensor(primals_10, (30976, 2048), (1, 30976), 0
), out=buf21)
buf22 = buf21
del buf21
triton_poi_fused_relu_13[grid(8192)](buf22, primals_11, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf23 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf22, reinterpret_tensor(primals_12, (2048, 512),
(1, 2048), 0), out=buf23)
buf24 = buf23
del buf23
triton_poi_fused_relu_14[grid(2048)](buf24, primals_13, 2048,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf25 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf24, reinterpret_tensor(primals_14, (512, 128),
(1, 512), 0), out=buf25)
buf26 = buf25
del buf25
triton_poi_fused_relu_15[grid(512)](buf26, primals_15, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_15
buf27 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_17, buf26, reinterpret_tensor(
primals_16, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf27)
del primals_17
return (buf27, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10,
buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor
(buf20, (4, 30976), (30976, 1), 0), buf22, buf24, buf26, primals_16,
primals_14, primals_12, primals_10)
class ToyNetNew(nn.Module):
def __init__(self):
super(ToyNetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.conv3 = nn.Conv2d(16, 64, 3)
self.conv4 = nn.Conv2d(64, 256, 3)
self.fc0 = nn.Linear(256 * 11 * 11, 2048)
self.fc1 = nn.Linear(2048, 512)
self.fc2 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc0.weight
primals_11 = self.fc0.bias
primals_12 = self.fc1.weight
primals_13 = self.fc1.bias
primals_14 = self.fc2.weight
primals_15 = self.fc2.bias
primals_16 = self.fc3.weight
primals_17 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| asalmanp/MIVisionX | ToyNet | false | 15,028 | [
"MIT"
]
| 153 | a964774944331827c8d6e9bb1ffbb2578f335056 | https://github.com/asalmanp/MIVisionX/tree/a964774944331827c8d6e9bb1ffbb2578f335056 |
MS_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oo/cook4mll2miugq4l2i7s5uxydy2ao2dpspoakpbbfrvvrrmovgoc.py
# Topologically Sorted Source Nodes: [add, out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# out => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %convolution_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %convolution_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_2, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, out], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf3, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
return (buf3, primals_1, primals_2, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.multiprocessing
class MS_Block(nn.Module):
def __init__(self, input_feature, out_feature, d=[1, 2, 4], group=1):
super(MS_Block, self).__init__()
self.l1 = nn.Conv2d(input_feature, out_feature, 3, padding=d[0],
dilation=d[0], bias=False, groups=group)
self.l2 = nn.Conv2d(input_feature, out_feature, 3, padding=d[1],
dilation=d[1], bias=False, groups=group)
self.l3 = nn.Conv2d(input_feature, out_feature, 3, padding=d[2],
dilation=d[2], bias=False, groups=group)
def forward(self, x):
out = self.l1(x) + self.l2(x) + self.l3(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_feature': 4, 'out_feature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1,
1), padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_2, primals_4, stride=(1,
1), padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf3, buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
del buf2
return buf3, primals_1, primals_2, primals_3, primals_4
class MS_BlockNew(nn.Module):
def __init__(self, input_feature, out_feature, d=[1, 2, 4], group=1):
super(MS_BlockNew, self).__init__()
self.l1 = nn.Conv2d(input_feature, out_feature, 3, padding=d[0],
dilation=d[0], bias=False, groups=group)
self.l2 = nn.Conv2d(input_feature, out_feature, 3, padding=d[1],
dilation=d[1], bias=False, groups=group)
self.l3 = nn.Conv2d(input_feature, out_feature, 3, padding=d[2],
dilation=d[2], bias=False, groups=group)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_3 = self.l2.weight
primals_4 = self.l3.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| chiukin/RANet | MS_Block | false | 15,029 | [
"Apache-2.0"
]
| 267 | 681a47d9b1f114653290678f02f2d3ecdf4010bc | https://github.com/chiukin/RANet/tree/681a47d9b1f114653290678f02f2d3ecdf4010bc |
InvertibleUpsampling2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/am/camznkwec2lbh6msqczu6byzeziacywdz5jsefl4uifgbyfrwx5b.py
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
# Source node to ATen node mapping:
# autograd_function_apply => add, eq, full_default, full_default_1, iota, sub_1, where
# skew_symmetric_matrix => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %permute), kwargs = {})
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %sub), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %sub), kwargs = {})
triton_poi_fused_add_eye_sub_0 = async_compile.triton('triton_poi_fused_add_eye_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + (4*y0)), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + (4*y0)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fv/cfvl2yytwjnvovj3pb564j4e36ftzegt465t5hi3f4s6v27gm42g.py
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_2, %view, None, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_sub_0.run(primals_1, buf0, buf5, 4, 4, grid=grid(4, 4), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add, aten.linalg_lu_factor_ex]
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.linalg_lu_solve]
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 1, 2, 2), (4, 4, 2, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(primals_2, buf8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 8, 8), (64, 64, 8, 1))
del buf8
# Topologically Sorted Source Nodes: [], Original ATen: [aten.linalg_lu_solve]
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return (buf9, primals_2, buf5, reinterpret_tensor(buf7, (4, 1, 2, 2), (1, 16, 8, 4), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_2_t
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleUpsampling2D(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_2_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_pair(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels // channel_multiplier
super(InvertibleUpsampling2D, self).__init__(*args,
low_channel_number=self.out_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def forward(self, x):
return F.conv_transpose2d(x, self.kernel, stride=self.stride,
groups=self.low_channel_number)
def inverse(self, x):
return F.conv2d(x, self.kernel, stride=self.stride, groups=self.
low_channel_number)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_2_t
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp6 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp7 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp0 = y0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x1 + 4 * y0), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x1 + 4 * y0), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_sub_0[grid(4, 4)](primals_1, buf0, buf5, 4,
4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf0, (4, 1, 2, 2), (4, 4, 2, 1), 0)
del buf0
triton_poi_fused_convolution_1[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
buf9 = extern_kernels.convolution(primals_2, buf8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 8, 8), (64, 64, 8, 1))
del buf8
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
buf11 = buf10
del buf10
return buf9, primals_2, buf5, reinterpret_tensor(buf7, (4, 1, 2, 2), (1,
16, 8, 4), 0), buf11
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleUpsampling2DNew(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_2_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_pair(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels // channel_multiplier
super(InvertibleUpsampling2DNew, self).__init__(*args,
low_channel_number=self.out_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def inverse(self, x):
return F.conv2d(x, self.kernel, stride=self.stride, groups=self.
low_channel_number)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| cetmann/iunets | InvertibleUpsampling2D | false | 15,030 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
EPE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/66/c66hxpbagrcocdwcb457ompxihfzu23jo3ydcrdaz3uwb63isyhd.py
# Topologically Sorted Source Nodes: [sub, loss_map, sum_1, add, loss_map_1], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add]
# Source node to ATen node mapping:
# add => add
# loss_map => pow_1
# loss_map_1 => pow_2
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
triton_poi_fused_add_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_add_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 1e-06
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ek/cekbmmkf6jeivdyacikwn7uzkvx2g3akquy5d3t5m43oyey2rpi2.py
# Topologically Sorted Source Nodes: [sub, loss_map, sum_1, add, loss_map_1, mul], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# loss_map => pow_1
# loss_map_1 => pow_2
# mul => mul
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %arg2_1), kwargs = {})
triton_poi_fused_add_mul_pow_sub_sum_1 = async_compile.triton('triton_poi_fused_add_mul_pow_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, loss_map, sum_1, add, loss_map_1], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_pow_sub_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, loss_map, sum_1, add, loss_map_1, mul], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.mul]
triton_poi_fused_add_mul_pow_sub_sum_1.run(buf0, arg2_1, buf1, 256, grid=grid(256), stream=stream0)
del arg2_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.cpp_extension
class EPE(nn.Module):
def __init__(self):
super(EPE, self).__init__()
def forward(self, flow, gt, loss_mask):
loss_map = (flow - gt.detach()) ** 2
loss_map = (loss_map.sum(1, True) + 1e-06) ** 0.5
return loss_map * loss_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 1e-06
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sub_sum_0[grid(64)](arg1_1, arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_sub_sum_1[grid(256)](buf0, arg2_1,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del buf0
return buf1,
class EPENew(nn.Module):
def __init__(self):
super(EPENew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| P2Oileen/oh-my-face | EPE | false | 15,031 | [
"MIT"
]
| 45 | b73cb8ea713205bbf2bc1408145fa668c715359b | https://github.com/P2Oileen/oh-my-face/tree/b73cb8ea713205bbf2bc1408145fa668c715359b |
InvertibleDownsampling1D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ga/cgaakqg3ywxptwy54nvt4qkkvsycenvgmlvhsrdqfdz2ng44kcgy.py
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
# Source node to ATen node mapping:
# autograd_function_apply => add, eq, full_default, full_default_1, iota, sub_1, where
# skew_symmetric_matrix => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %permute), kwargs = {})
# %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %sub), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %sub), kwargs = {})
triton_poi_fused_add_eye_sub_0 = async_compile.triton('triton_poi_fused_add_eye_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 2], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 2
x2 = xindex
y3 = yindex
y1 = (yindex // 2)
tmp6 = tl.load(in_ptr0 + (x2 + (2*y3)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (y0 + (2*x2) + (4*y1)), xmask & ymask, eviction_policy='evict_last')
tmp0 = y0
tmp1 = x2
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x2 + (2*y3)), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x2 + (2*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ty/ctycmrgpokyxxuy4pplscmqrgmmnkumbz3n53iwz4pkcbhovg7w7.py
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# reshape => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%linalg_lu_solve,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 2], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = (yindex // 2)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (4*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (2*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 2), (4, 2, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 2), (4, 2, 1), torch.float32)
buf5 = empty_strided_cuda((4, 2, 2), (4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_sub_0.run(primals_1, buf0, buf5, 8, 2, grid=grid(8, 2), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [skew_symmetric_matrix, autograd_function_apply], Original ATen: [aten.sub, aten.eye, aten.add, aten.linalg_lu_factor_ex]
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.linalg_lu_solve]
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf7, buf8, 8, 2, grid=grid(8, 2), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (8, 1, 2), (2, 0, 1), 0), stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf9, (1, 8, 2), (16, 2, 1))
# Topologically Sorted Source Nodes: [], Original ATen: [aten.linalg_lu_solve]
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
del buf7
buf11 = buf10
del buf10
return (reinterpret_tensor(buf9, (8, 2), (2, 1), 0), buf5, reinterpret_tensor(buf8, (8, 1, 2), (2, 2, 1), 0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 2, 2), (4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_1_t
from torch.nn.modules.utils import _single
import torch.nn.functional as F
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleDownsampling1D(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_1_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_single(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels * channel_multiplier
super(InvertibleDownsampling1D, self).__init__(*args,
low_channel_number=self.in_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def forward(self, x):
return F.conv1d(x, self.kernel, stride=self.stride, groups=self.
low_channel_number)
def inverse(self, x):
return F.conv_transpose1d(x, self.kernel, stride=self.stride,
groups=self.low_channel_number)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import numpy as np
from warnings import warn
from typing import Union
from typing import Tuple
from torch.nn.common_types import _size_1_t
from torch.nn.modules.utils import _single
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_eye_sub_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 2
x2 = xindex
y3 = yindex
y1 = yindex // 2
tmp6 = tl.load(in_ptr0 + (x2 + 2 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (y0 + 2 * x2 + 4 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp0 = y0
tmp1 = x2
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp5 - tmp8
tl.store(out_ptr0 + (x2 + 2 * y3), tmp9, xmask & ymask)
tl.store(out_ptr1 + (x2 + 2 * y3), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 4 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 2 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 2), (4, 2, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 2), (4, 2, 1), torch.float32)
buf5 = empty_strided_cuda((4, 2, 2), (4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_sub_0[grid(8, 2)](primals_1, buf0, buf5, 8,
2, XBLOCK=2, YBLOCK=8, num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten.linalg_lu_factor_ex.default(buf0)
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf6 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf5)
buf7 = buf6
del buf6
buf8 = buf0
del buf0
triton_poi_fused_clone_1[grid(8, 2)](buf7, buf8, 8, 2, XBLOCK=2,
YBLOCK=8, num_warps=1, num_stages=1)
buf9 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (8, 1, 2), (2,
0, 1), 0), stride=(2,), padding=(0,), dilation=(1,), transposed
=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf9, (1, 8, 2), (16, 2, 1))
buf10 = torch.ops.aten.linalg_lu_solve.default(buf2, buf3, buf7)
del buf2
del buf3
del buf7
buf11 = buf10
del buf10
return reinterpret_tensor(buf9, (8, 2), (2, 1), 0
), buf5, reinterpret_tensor(buf8, (8, 1, 2), (2, 2, 1), 0
), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf11
def _cayley(A):
I = torch.eye(A.shape[-1], device=A.device)
LU = torch.lu(I + A, pivot=True)
return torch.lu_solve(I - A, *LU)
def _cayley_frechet(A, H, Q=None):
I = torch.eye(A.shape[-1], device=A.device)
if Q is None:
Q = _cayley(A)
_LU = torch.lu(I + A, pivot=True)
p = torch.lu_solve(Q, *_LU)
_LU = torch.lu(I - A, pivot=True)
q = torch.lu_solve(H, *_LU)
return 2.0 * q @ p
def __calculate_kernel_matrix_cayley__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return cayley.apply(skew_symmetric_matrix)
def matrix_1_norm(A):
"""Calculates the 1-norm of a matrix or a batch of matrices.
Args:
A (torch.Tensor): Can be either of size (n,n) or (m,n,n).
Returns:
torch.Tensor : The 1-norm of A.
"""
norm, _indices = torch.max(torch.sum(torch.abs(A), axis=-2), axis=-1)
return norm
def _compute_scales(A):
"""Compute optimal parameters for scaling-and-squaring algorithm.
The constants used in this function are determined by the MATLAB
function found in
https://github.com/cetmann/pytorch_expm/blob/master/determine_frechet_scaling_constant.m
"""
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if A.dtype == torch.float64:
if A.requires_grad:
ell = {(3): 0.010813385777848, (5): 0.199806320697895, (7):
0.783460847296204, (9): 1.782448623969279, (13):
4.740307543765127}
else:
ell = {(3): 0.014955852179582, (5): 0.253939833006323, (7):
0.950417899616293, (9): 2.097847961257068, (13):
5.371920351148152}
if max_norm >= ell[9]:
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5, 7, 9]:
if max_norm < ell[m]:
magic_number = ell[m]
break
elif A.dtype == torch.float32:
if A.requires_grad:
ell = {(3): 0.30803304184533, (5): 1.482532614793145, (7):
3.248671755200478}
else:
ell = {(3): 0.425873001692283, (5): 1.880152677804762, (7):
3.92572478313866}
if max_norm >= ell[5]:
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_(norm / magic_number)))
else:
for m in [3, 5]:
if max_norm < ell[m]:
magic_number = ell[m]
break
return s, m
def _eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def _expm_frechet_pade(A, E, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
if m >= 3:
M_2 = A @ E + E @ A
A_2 = A @ A
U = b[3] * A_2
V = b[2] * A_2
L_U = b[3] * M_2
L_V = b[2] * M_2
if m >= 5:
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
L_U = L_U + b[5] * M_4
L_V = L_V + b[4] * M_4
if m >= 7:
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
L_U = L_U + b[7] * M_6
L_V = L_V + b[6] * M_6
if m == 9:
M_8 = A_4 @ M_4 + M_4 @ A_4
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
L_U = L_U + b[9] * M_8
L_V = L_V + b[8] * M_8
U = U + b[1] * I
V = U + b[0] * I
del I
L_U = A @ L_U
L_U = L_U + E @ U
U = A @ U
else:
M_2 = A @ E + E @ A
A_2 = A @ A
M_4 = A_2 @ M_2 + M_2 @ A_2
A_4 = A_2 @ A_2
M_6 = A_4 @ M_2 + M_4 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
L_W1 = b[13] * M_6 + b[11] * M_4 + b[9] * M_2
L_W2 = b[7] * M_6 + b[5] * M_4 + b[3] * M_2
L_Z1 = b[12] * M_6 + b[10] * M_4 + b[8] * M_2
L_Z2 = b[6] * M_6 + b[4] * M_4 + b[2] * M_2
L_W = A_6 @ L_W1 + M_6 @ W_1 + L_W2
L_U = A @ L_W + E @ W
L_V = A_6 @ L_Z1 + M_6 @ Z_1 + L_Z2
lu_decom = torch.lu(-U + V)
exp_A = torch.lu_solve(U + V, *lu_decom)
dexp_A = torch.lu_solve(L_U + L_V + (L_U - L_V) @ exp_A, *lu_decom)
return exp_A, dexp_A
def _square(s, R, L=None):
"""The `squaring` part of the `scaling-and-squaring` algorithm.
This works both for the forward as well as the derivative of
the matrix exponential.
"""
s_max = torch.max(s).int()
if s_max > 0:
I = _eye_like(R)
if L is not None:
O = torch.zeros_like(R)
indices = [(1) for k in range(len(R.shape) - 1)]
for i in range(s_max):
mask = i >= s
matrices_mask = mask.view(-1, *indices)
temp_eye = torch.clone(R).masked_scatter(matrices_mask, I)
if L is not None:
temp_zeros = torch.clone(R).masked_scatter(matrices_mask, O)
L = temp_eye @ L + temp_zeros @ L
R = R @ temp_eye
del temp_eye, mask
if L is not None:
return R, L
else:
return R
def _expm_frechet_scaling_squaring(A, E, adjoint=False):
"""Numerical Fréchet derivative of matrix exponentiation.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
if adjoint is True:
A = torch.transpose(A, -1, -2)
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
scaling_factors = torch.pow(2, -s).view(-1, *indices)
A = A * scaling_factors
E = E * scaling_factors
exp_A, dexp_A = _expm_frechet_pade(A, E, m)
exp_A, dexp_A = _square(s, exp_A, dexp_A)
return dexp_A
def _expm_pade(A, m=7):
assert m in [3, 5, 7, 9, 13]
if m == 3:
b = [120.0, 60.0, 12.0, 1.0]
elif m == 5:
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0, 1.0]
elif m == 7:
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0,
56.0, 1.0]
elif m == 9:
b = [17643225600.0, 8821612800.0, 2075673600.0, 302702400.0,
30270240.0, 2162160.0, 110880.0, 3960.0, 90.0, 1.0]
elif m == 13:
b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0,
670442572800.0, 33522128640.0, 1323241920.0, 40840800.0,
960960.0, 16380.0, 182.0, 1.0]
I = _eye_like(A)
if m != 13:
U = b[1] * I
V = b[0] * I
if m >= 3:
A_2 = A @ A
U = U + b[3] * A_2
V = V + b[2] * A_2
if m >= 5:
A_4 = A_2 @ A_2
U = U + b[5] * A_4
V = V + b[4] * A_4
if m >= 7:
A_6 = A_4 @ A_2
U = U + b[7] * A_6
V = V + b[6] * A_6
if m == 9:
A_8 = A_4 @ A_4
U = U + b[9] * A_8
V = V + b[8] * A_8
U = A @ U
else:
A_2 = A @ A
A_4 = A_2 @ A_2
A_6 = A_4 @ A_2
W_1 = b[13] * A_6 + b[11] * A_4 + b[9] * A_2
W_2 = b[7] * A_6 + b[5] * A_4 + b[3] * A_2 + b[1] * I
W = A_6 @ W_1 + W_2
Z_1 = b[12] * A_6 + b[10] * A_4 + b[8] * A_2
Z_2 = b[6] * A_6 + b[4] * A_4 + b[2] * A_2 + b[0] * I
U = A @ W
V = A_6 @ Z_1 + Z_2
del A_2
if m >= 5:
del A_4
if m >= 7:
del A_6
if m == 9:
del A_8
R = torch.lu_solve(U + V, *torch.lu(-U + V))
del U, V
return R
def _expm_scaling_squaring(A):
"""Scaling-and-squaring algorithm for matrix eponentiation.
This is based on the observation that exp(A) = exp(A/k)^k, where
e.g. k=2^s. The exponential exp(A/(2^s)) is calculated by a diagonal
Padé approximation, where s is chosen based on the 1-norm of A, such
that certain approximation guarantees can be given. exp(A) is then
calculated by repeated squaring via exp(A/(2^s))^(2^s). This function
works both for (n,n)-tensors as well as batchwise for (m,n,n)-tensors.
"""
assert A.shape[-1] == A.shape[-2] and len(A.shape) in [2, 3]
True if len(A.shape) == 3 else False
s, m = _compute_scales(A)
if torch.max(s) > 0:
indices = [(1) for k in range(len(A.shape) - 1)]
A = A * torch.pow(2, -s).view(-1, *indices)
exp_A = _expm_pade(A, m)
exp_A = _square(s, exp_A)
return exp_A
def __calculate_kernel_matrix_exp__(weight, **kwargs):
skew_symmetric_matrix = weight - torch.transpose(weight, -1, -2)
return expm.apply(skew_symmetric_matrix)
def eye_like(M, device=None, dtype=None):
"""Creates an identity matrix of the same shape as another matrix.
For matrix M, the output is same shape as M, if M is a (n,n)-matrix.
If M is a batch of m matrices (i.e. a (m,n,n)-tensor), create a batch of
(n,n)-identity-matrices.
Args:
M (torch.Tensor) : A tensor of either shape (n,n) or (m,n,n), for
which either an identity matrix or a batch of identity matrices
of the same shape will be created.
device (torch.device, optional) : The device on which the output
will be placed. By default, it is placed on the same device
as M.
dtype (torch.dtype, optional) : The dtype of the output. By default,
it is the same dtype as M.
Returns:
torch.Tensor : Identity matrix or batch of identity matrices.
"""
assert len(M.shape) in [2, 3]
assert M.shape[-1] == M.shape[-2]
n = M.shape[-1]
if device is None:
device = M.device
if dtype is None:
dtype = M.dtype
eye = torch.eye(M.shape[-1], device=device, dtype=dtype)
if len(M.shape) == 2:
return eye
else:
m = M.shape[0]
return eye.view(-1, n, n).expand(m, -1, -1)
def householder_matrix(unit_vector):
if unit_vector.shape[-1] != 1:
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-06):
norms = torch.sqrt(torch.sum(matrix ** 2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-06):
"""Implements a product of Householder transforms.
"""
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn('n_reflections is set higher than the number of rows.')
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(matrix.shape[-2], dtype=matrix.dtype, device=
matrix.device)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i + 1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output
def __calculate_kernel_matrix_householder__(weight, **kwargs):
n_reflections = kwargs.get('n_reflections', -1)
eps = kwargs.get('eps', 1e-06)
weight.shape[-1]
weight = weight[..., n_reflections:]
return householder_transform(weight, n_reflections, eps)
def __initialize_weight__(kernel_matrix_shape: 'Tuple[int, ...]', stride:
'Tuple[int, ...]', method: 'str'='cayley', init: 'str'='haar', dtype:
'str'='float32', *args, **kwargs):
"""Function which computes specific orthogonal matrices.
For some chosen method of parametrizing orthogonal matrices, this
function outputs the required weights necessary to represent a
chosen initialization as a Pytorch tensor of matrices.
Args:
kernel_matrix_shape : The output shape of the
orthogonal matrices. Should be (num_matrices, height, width).
stride : The stride for the invertible up- or
downsampling for which this matrix is to be used. The length
of ``stride`` should match the dimensionality of the data.
method : The method for parametrising orthogonal matrices.
Should be 'exp' or 'cayley'
init : The matrix which should be represented. Should be
'squeeze', 'pixel_shuffle', 'haar' or 'random'. 'haar' is only
possible if ``stride`` is only 2.
dtype : Numpy dtype which should be used for the matrix.
*args: Variable length argument iterable.
**kwargs: Arbitrary keyword arguments.
Returns:
Tensor : Orthogonal matrices of shape ``kernel_matrix_shape``.
"""
dim = len(stride)
num_matrices = kernel_matrix_shape[0]
assert method in ['exp', 'cayley', 'householder']
if method == 'householder':
warn(
'Householder parametrization not fully implemented yet. Only random initialization currently working.'
)
init = 'random'
if init == 'random':
return torch.randn(kernel_matrix_shape)
if init == 'haar' and set(stride) != {2}:
None
None
init = 'squeeze'
if init == 'haar' and set(stride) == {2}:
if method == 'exp':
p = np.pi / 4
if dim == 1:
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
weight = np.array([[[0, p, p, 0, p, 0, 0, 0], [0, 0, 0, p,
0, p, 0, 0], [0, 0, 0, p, 0, 0, p, 0], [0, 0, 0, 0, 0,
0, 0, p], [0, 0, 0, 0, 0, p, p, 0], [0, 0, 0, 0, 0, 0,
0, p], [0, 0, 0, 0, 0, 0, 0, p], [0, 0, 0, 0, 0, 0, 0,
0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
elif method == 'cayley':
if dim == 1:
p = -np.sqrt(2) / (2 - np.sqrt(2))
weight = np.array([[[0, p], [0, 0]]], dtype=dtype)
if dim == 2:
p = 0.5
weight = np.array([[[0, 0, p, p], [0, 0, -p, -p], [0, 0, 0,
0], [0, 0, 0, 0]]], dtype=dtype)
if dim == 3:
p = 1 / np.sqrt(2)
weight = np.array([[[0, -p, -p, 0, -p, 0, 0, 1 - p], [0, 0,
0, -p, 0, -p, p - 1, 0], [0, 0, 0, -p, 0, p - 1, -p, 0],
[0, 0, 0, 0, 1 - p, 0, 0, -p], [0, 0, 0, 0, 0, -p, -p,
0], [0, 0, 0, 0, 0, 0, 0, -p], [0, 0, 0, 0, 0, 0, 0, -p
], [0, 0, 0, 0, 0, 0, 0, 0]]], dtype=dtype)
return torch.tensor(weight).repeat(num_matrices, 1, 1)
if init in ['squeeze', 'pixel_shuffle', 'zeros']:
if method == 'exp' or method == 'cayley':
return torch.zeros(*kernel_matrix_shape)
if type(init) is np.ndarray:
init = torch.tensor(init.astype(dtype))
if torch.is_tensor(init):
if len(init.shape) == 2:
init = init.reshape(1, *init.shape)
if init.shape[0] == 1:
init = init.repeat(num_matrices, 1, 1)
assert init.shape == kernel_matrix_shape
return init
else:
raise NotImplementedError('Unknown initialization.')
class cayley(Function):
"""Computes the Cayley transform.
"""
@staticmethod
def forward(ctx, M):
cayley_M = _cayley(M)
ctx.save_for_backward(M, cayley_M)
return cayley_M
@staticmethod
def backward(ctx, grad_out):
M, cayley_M = ctx.saved_tensors
dcayley_M = _cayley_frechet(M, grad_out, Q=cayley_M)
return dcayley_M
class expm(Function):
"""Computes the matrix exponential.
"""
@staticmethod
def forward(ctx, M):
expm_M = _expm_scaling_squaring(M)
ctx.save_for_backward(M)
return expm_M
@staticmethod
def backward(ctx, grad_out):
M = ctx.saved_tensors[0]
dexpm = _expm_frechet_scaling_squaring(M, grad_out, adjoint=True)
return dexpm
class OrthogonalResamplingLayer(torch.nn.Module):
"""Base class for orthogonal up- and downsampling operators.
:param low_channel_number:
Lower number of channels. These are the input
channels in the case of downsampling ops, and the output
channels in the case of upsampling ops.
:param stride:
The downsampling / upsampling factor for each dimension.
:param channel_multiplier:
The channel multiplier, i.e. the number
by which the number of channels are multiplied (downsampling)
or divided (upsampling).
:param method:
Which method to use for parametrizing orthogonal
matrices which are used as convolutional kernels.
"""
def __init__(self, low_channel_number: 'int', stride:
'Union[int, Tuple[int, ...]]', method: 'str'='cayley', init:
'Union[str, np.ndarray, torch.Tensor]'='haar', learnable: 'bool'=
True, init_kwargs: 'dict'=None, **kwargs):
super(OrthogonalResamplingLayer, self).__init__()
self.low_channel_number = low_channel_number
self.method = method
self.stride = stride
self.channel_multiplier = int(np.prod(stride))
self.high_channel_number = self.channel_multiplier * low_channel_number
if init_kwargs is None:
init_kwargs = {}
self.init_kwargs = init_kwargs
self.kwargs = kwargs
assert method in ['exp', 'cayley', 'householder']
if method == 'exp':
self.__calculate_kernel_matrix__ = __calculate_kernel_matrix_exp__
elif method == 'cayley':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_cayley__)
elif method == 'householder':
self.__calculate_kernel_matrix__ = (
__calculate_kernel_matrix_householder__)
self._kernel_matrix_shape = (self.low_channel_number,) + (self.
channel_multiplier,) * 2
self._kernel_shape = (self.high_channel_number, 1) + self.stride
self.weight = torch.nn.Parameter(__initialize_weight__(
kernel_matrix_shape=self._kernel_matrix_shape, stride=self.
stride, method=self.method, init=init, **self.init_kwargs))
self.weight.requires_grad = learnable
@property
def kernel_matrix(self):
"""The orthogonal matrix created by the chosen parametrisation method.
"""
return self.__calculate_kernel_matrix__(self.weight, **self.kwargs)
@property
def kernel(self):
"""The kernel associated with the invertible up-/downsampling.
"""
return self.kernel_matrix.reshape(*self._kernel_shape)
class InvertibleDownsampling1DNew(OrthogonalResamplingLayer):
def __init__(self, in_channels: 'int', stride: '_size_1_t'=2, method:
'str'='cayley', init: 'str'='haar', learnable: 'bool'=True, *args,
**kwargs):
stride = tuple(_single(stride))
channel_multiplier = int(np.prod(stride))
self.in_channels = in_channels
self.out_channels = in_channels * channel_multiplier
super(InvertibleDownsampling1DNew, self).__init__(*args,
low_channel_number=self.in_channels, stride=stride, method=
method, init=init, learnable=learnable, **kwargs)
def inverse(self, x):
return F.conv_transpose1d(x, self.kernel, stride=self.stride,
groups=self.low_channel_number)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| cetmann/iunets | InvertibleDownsampling1D | false | 15,032 | [
"MIT"
]
| 86 | 80ed7cce0e505a0396c42359eaf27819222d71f6 | https://github.com/cetmann/iunets/tree/80ed7cce0e505a0396c42359eaf27819222d71f6 |
MLP_G | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5n/c5nsofekij6a7ap52g7x25mzkhruvzcsi6l2zpm7nkpnzba6k7fc.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# h_1 => gt, mul, where
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b6/cb6p4coebyc36mfgscp5avnf6elbckuaiicncljn73yyl7dip5fo.py
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_2 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_6, buf5, 16, grid=grid(16), stream=stream0)
del primals_6
return (buf4, buf0, buf2, buf5, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_G(nn.Module):
def __init__(self, opt):
super(MLP_G, self).__init__()
self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, noise, att):
h = torch.cat((noise, att), 1)
h = self.lrelu(self.fc1(h))
h = self.relu(self.fc2(h))
return h
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(attSize=4, nz=4, ngh=4, resSize=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_leaky_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16)](buf4,
primals_6, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_6
return buf4, buf0, buf2, buf5, primals_5
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_GNew(nn.Module):
def __init__(self, opt):
super(MLP_GNew, self).__init__()
self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = self.fc2.weight
primals_6 = self.fc2.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Huihui-z/CE-GZSL | MLP_G | false | 15,033 | [
"MIT"
]
| 58 | 7bf5358ac4727ea1dc2dc9dec2f453b014500bd8 | https://github.com/Huihui-z/CE-GZSL/tree/7bf5358ac4727ea1dc2dc9dec2f453b014500bd8 |
SqueezeExcite | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x_se], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x_se => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/o5/co5kpgkyaabh4nd7yz4gzpyl7x35mwdhgusbruykvtydzlq2lizg.py
# Topologically Sorted Source Nodes: [x_se_1, x_se_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_se_1 => convolution
# x_se_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hc/chcuw27vv75itefu6xswbdhopf2pq6oaj4bww4whelczhqtkghqr.py
# Topologically Sorted Source Nodes: [x_se_3, add, relu6, truediv, x], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# relu6 => clamp_max, clamp_min
# truediv => div
# x => mul
# x_se_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_add_convolution_div_hardtanh_mul_2 = async_compile.triton('triton_poi_fused_add_convolution_div_hardtanh_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_hardtanh_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhdgkbcu2rbqzaxws3536qfrpjhmp6d6zhyebzqejxvceodywdi.py
# Topologically Sorted Source Nodes: [x_se_3, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
# Source node to ATen node mapping:
# add => add
# x_se_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3.0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add, 6), kwargs = {})
# %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {})
triton_poi_fused_add_convolution_hardtanh_backward_3 = async_compile.triton('triton_poi_fused_add_convolution_hardtanh_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_hardtanh_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_se], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_se_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_se_1, x_se_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_se_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_se_3, add, relu6, truediv, x], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
triton_poi_fused_add_convolution_div_hardtanh_mul_2.run(primals_1, buf4, primals_5, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_se_3, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
triton_poi_fused_add_convolution_hardtanh_backward_3.run(buf4, primals_5, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del primals_5
return (buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import product as product
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)](
primals_1, buf4, primals_5, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExciteNew(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExciteNew, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, input_0):
primals_2 = self.conv_reduce.weight
primals_3 = self.conv_reduce.bias
primals_4 = self.conv_expand.weight
primals_5 = self.conv_expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| chuanli11/SynergyNet | SqueezeExcite | false | 15,034 | [
"MIT"
]
| 82 | a8044d8dabbfb811d4299f59e64e0fb749027e86 | https://github.com/chuanli11/SynergyNet/tree/a8044d8dabbfb811d4299f59e64e0fb749027e86 |
BasicBlock_ins | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ye/cye7l2jaf362rrj43bugwtiqncxa3xnlfse2dg7bg4rqz2wqm2ew.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# out_1 => add, repeat, rsqrt, var_mean
# out_2 => relu
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_per_fused__native_batch_norm_legit_relu_repeat_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr4 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2h/c2hr43dson36ajal2z3sa3dla4lduhw4ruutyvsztayuunp3pw53.py
# Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_4 => add_2, repeat_2, rsqrt_1, var_mean_1
# out_6 => relu_1
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_6, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_8,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_16, 0), kwargs = {})
triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr4 + (r1 + (16*x0)), tmp33, xmask)
tl.store(out_ptr5 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_0.run(primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_1.run(primals_6, buf7, primals_7, primals_1, buf8, buf9, buf13, buf14, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_6
del primals_7
return (buf13, primals_1, primals_2, primals_5, buf0, buf1, reinterpret_tensor(buf5, (16, ), (1, ), 0), buf6, buf7, buf8, reinterpret_tensor(buf12, (16, ), (1, ), 0), buf14, reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.multiprocessing
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock_ins(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock_ins, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.InstanceNorm2d(planes, affine=True,
track_running_stats=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.InstanceNorm2d(planes, affine=True,
track_running_stats=False)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_1(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3,
out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + (r1 + 16 * x0), tmp33, xmask)
tl.store(out_ptr5 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_0[grid(16)](
primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((16,), (1,), torch.float32)
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_1[
grid(16)](primals_6, buf7, primals_7, primals_1, buf8, buf9,
buf13, buf14, buf12, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_6
del primals_7
return (buf13, primals_1, primals_2, primals_5, buf0, buf1,
reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf7, buf8,
reinterpret_tensor(buf12, (16,), (1,), 0), buf14,
reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0),
reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0))
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock_insNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock_insNew, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.InstanceNorm2d(planes, affine=True,
track_running_stats=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.InstanceNorm2d(planes, affine=True,
track_running_stats=False)
self.downsample = downsample
self.stride = stride
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.bn1.weight
primals_4 = self.bn1.bias
primals_5 = self.conv2.weight
primals_6 = self.bn2.weight
primals_7 = self.bn2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| chiukin/RANet | BasicBlock_ins | false | 15,035 | [
"Apache-2.0"
]
| 267 | 681a47d9b1f114653290678f02f2d3ecdf4010bc | https://github.com/chiukin/RANet/tree/681a47d9b1f114653290678f02f2d3ecdf4010bc |
ResBlock2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fv/cfvfxpmpur3qlmurffwz4u56tgvw75i4lbjvzd25ortunbobyxnh.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# out_1 => add, rsqrt, var_mean
# out_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_per_fused__native_batch_norm_legit_relu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp24 = tl.full([1, 1], 0, tl.int32)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp25, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lk/clk53igo2wowtd6pq5zu23svybsi67ef5dmegrb3qnpketcev22z.py
# Topologically Sorted Source Nodes: [out_7, out_9], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_7 => add_2, rsqrt_2, var_mean_2
# out_9 => relu_2
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_10, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_13,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_21, 0), kwargs = {})
triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tl.full([1, 1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(out_ptr2 + (r1 + (16*x0)), tmp27, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr4 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_0.run(buf0, buf1, buf5, buf4, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_per_fused__native_batch_norm_legit_relu_0.run(buf6, buf7, buf11, buf10, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_7, out_9], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1.run(buf12, primals_1, buf13, buf17, buf18, buf16, 16, 16, grid=grid(16), stream=stream0)
return (buf17, primals_1, primals_2, primals_3, primals_4, buf0, reinterpret_tensor(buf4, (16, ), (1, ), 0), buf5, buf6, reinterpret_tensor(buf10, (16, ), (1, ), 0), buf11, buf12, reinterpret_tensor(buf16, (16, ), (1, ), 0), buf18, reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.multiprocessing
class ResBlock2(nn.Module):
def __init__(self, input_feature, planes, dilated=1, group=1):
super(ResBlock2, self).__init__()
self.conv1 = nn.Conv2d(input_feature, planes, kernel_size=1, bias=
False, groups=group)
self.bn1 = nn.InstanceNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1 *
dilated, bias=False, dilation=dilated, groups=group)
self.bn2 = nn.InstanceNorm2d(planes)
self.conv3 = nn.Conv2d(planes, input_feature, kernel_size=1, bias=
False, groups=group)
self.bn3 = nn.InstanceNorm2d(input_feature)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_feature': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0,
out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp24 = tl.full([1, 1], 0, tl.int32)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp25, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0
, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tl.full([1, 1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(out_ptr2 + (r1 + 16 * x0), tmp27, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_0[grid(16)](buf0,
buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_relu_0[grid(16)](buf6,
buf7, buf11, buf10, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1[
grid(16)](buf12, primals_1, buf13, buf17, buf18, buf16, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
return (buf17, primals_1, primals_2, primals_3, primals_4, buf0,
reinterpret_tensor(buf4, (16,), (1,), 0), buf5, buf6,
reinterpret_tensor(buf10, (16,), (1,), 0), buf11, buf12,
reinterpret_tensor(buf16, (16,), (1,), 0), buf18,
reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0),
reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0),
reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ResBlock2New(nn.Module):
def __init__(self, input_feature, planes, dilated=1, group=1):
super(ResBlock2New, self).__init__()
self.conv1 = nn.Conv2d(input_feature, planes, kernel_size=1, bias=
False, groups=group)
self.bn1 = nn.InstanceNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1 *
dilated, bias=False, dilation=dilated, groups=group)
self.bn2 = nn.InstanceNorm2d(planes)
self.conv3 = nn.Conv2d(planes, input_feature, kernel_size=1, bias=
False, groups=group)
self.bn3 = nn.InstanceNorm2d(input_feature)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| chiukin/RANet | ResBlock2 | false | 15,036 | [
"Apache-2.0"
]
| 267 | 681a47d9b1f114653290678f02f2d3ecdf4010bc | https://github.com/chiukin/RANet/tree/681a47d9b1f114653290678f02f2d3ecdf4010bc |
FPNHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/w3/cw3zrmvca7grv74jw3rs72gt6ae2mq5prncci372h3zwksqmyouw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, primals_3, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class FPNHead(nn.Module):
def __init__(self, num_in, num_mid, num_out):
super().__init__()
self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1,
bias=False)
self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, x):
x = nn.functional.relu(self.block0(x), inplace=True)
x = nn.functional.relu(self.block1(x), inplace=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_mid': 4, 'num_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, buf4,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1, buf4
class FPNHeadNew(nn.Module):
def __init__(self, num_in, num_mid, num_out):
super().__init__()
self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1,
bias=False)
self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, input_0):
primals_1 = self.block0.weight
primals_3 = self.block1.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| choprahetarth/DeblurGANv2 | FPNHead | false | 15,037 | [
"BSD-3-Clause"
]
| 321 | e36dc2fef169b8a37036abe62192b6a925fb6c81 | https://github.com/choprahetarth/DeblurGANv2/tree/e36dc2fef169b8a37036abe62192b6a925fb6c81 |
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vp/cvpfqtmyqyejplddg35qfvsydx2nitayg4a7ndcskm65y477g3hn.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [2], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [2], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dp/cdprejhdxjipmbhepumj5lwmh7q3n4bwayslcvlayzluxrhcsubr.py
# Topologically Sorted Source Nodes: [log_attn], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_attn => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, buf4, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_attn], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf4
return (buf3, buf2, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'temperature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf4
return buf3, buf2, buf5
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1], output[2]
| cjy97/FEAT | ScaledDotProductAttention | false | 15,038 | [
"MIT"
]
| 330 | 9d48b254bc5f0a2211c2aad0a60388a8a2c8081c | https://github.com/cjy97/FEAT/tree/9d48b254bc5f0a2211c2aad0a60388a8a2c8081c |
MFBFusion | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xt/cxtcnkiv6sa3ulwq4pnwdxnxp5jwy7lem53u66doekf4oekg437g.py
# Topologically Sorted Source Nodes: [mm_sumpool], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# mm_sumpool => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_4, [3], True), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [text_proj], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [image_proj], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [mm_sumpool], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(buf0, buf1, buf2, 16, 16, grid=grid(16), stream=stream0)
return (reinterpret_tensor(buf2, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import time
import torch
from torch import nn
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
self.model_name = str(type(self))
def load(self, path):
self.load_state_dict(torch.load(path))
def save(self, name=None):
if name is None:
prefix = 'checkpoints/' + self.model_name + '_'
name = time.strftime(prefix + '%m%d_%H:%M:%S.pth')
torch.save(self.state_dict(), name)
return name
def forward(self, *input):
raise NotImplementedError
class MFBFusion(BaseModel):
def __init__(self, mfb_fusion_option: 'MFBFusionOption'):
super(MFBFusion, self).__init__()
self.out_dim = mfb_fusion_option.outdim
self.linear1 = nn.Linear(mfb_fusion_option.text_size,
mfb_fusion_option.joint_emb_size)
self.linear2 = nn.Linear(mfb_fusion_option.image_size,
mfb_fusion_option.joint_emb_size)
self.dropout = nn.Dropout(p=mfb_fusion_option.dropout)
def forward(self, text_feat, image_feat):
batch_size = text_feat.size(0)
text_proj = self.linear1(text_feat)
image_proj = self.linear2(image_feat)
mm_eltwise = torch.mul(text_proj, image_proj)
mm_drop = self.dropout(mm_eltwise)
mm_resh = mm_drop.view(batch_size, 1, self.out_dim, -1)
mm_sumpool = torch.sum(mm_resh, 3, keepdim=True)
mfb_out = torch.squeeze(mm_sumpool)
return mfb_out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'mfb_fusion_option': _mock_config(outdim=4, text_size=4,
joint_emb_size=4, image_size=4, dropout=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import time
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(16)](buf0, buf1, buf2, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
return reinterpret_tensor(buf2, (4, 4), (4, 1), 0), reinterpret_tensor(
primals_1, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6,
(64, 4), (4, 1), 0), buf1
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
self.model_name = str(type(self))
def load(self, path):
self.load_state_dict(torch.load(path))
def save(self, name=None):
if name is None:
prefix = 'checkpoints/' + self.model_name + '_'
name = time.strftime(prefix + '%m%d_%H:%M:%S.pth')
torch.save(self.state_dict(), name)
return name
def forward(self, *input):
raise NotImplementedError
class MFBFusionNew(BaseModel):
def __init__(self, mfb_fusion_option: 'MFBFusionOption'):
super(MFBFusionNew, self).__init__()
self.out_dim = mfb_fusion_option.outdim
self.linear1 = nn.Linear(mfb_fusion_option.text_size,
mfb_fusion_option.joint_emb_size)
self.linear2 = nn.Linear(mfb_fusion_option.image_size,
mfb_fusion_option.joint_emb_size)
self.dropout = nn.Dropout(p=mfb_fusion_option.dropout)
def forward(self, input_0, input_1):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| chorseng/UMD | MFBFusion | false | 15,039 | [
"MIT"
]
| 48 | 680681fea76abcea02ff5f351727bcbb468c372a | https://github.com/chorseng/UMD/tree/680681fea76abcea02ff5f351727bcbb468c372a |
SimpleConvNetBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lp/clp5koy47ebnhv762u6de7t7ris3q4qlppwpsulnp65uy2dziuza.py
# Topologically Sorted Source Nodes: [y, y_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# y => convolution
# y_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ll/cll6dbzw5olx6lxt7vekbqx26bfcbry3sslqjtv73fvz7wy6k5vj.py
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# y_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (9*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (9*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (3 + (9*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + (9*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [y, y_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 144, grid=grid(144), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int8)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 16, grid=grid(16), stream=stream0)
return (buf2, primals_1, primals_3, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SimpleConvNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel):
nn.Module.__init__(self)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel, padding=1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(2)
def forward(self, x):
y = self.conv(x)
y = self.relu(y)
y = self.maxpool(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 9 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 9 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (3 + 9 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + 9 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(144)](buf1, primals_2, 144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(16)](buf1, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class SimpleConvNetBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel):
nn.Module.__init__(self)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel, padding=1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(2)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| cle-ros/RoutingNetworks | SimpleConvNetBlock | false | 15,040 | [
"Apache-2.0"
]
| 63 | 0f1fe1221c67a224a02bca6247d3c4488ede0a04 | https://github.com/cle-ros/RoutingNetworks/tree/0f1fe1221c67a224a02bca6247d3c4488ede0a04 |
PositionwiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7b/c7bsbu5nqjwno7oolhruidochm2rierdki7fkzahol2dvs7dgv5t.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/62/c622us6etqvroftqelxdgdedtcxkzuvbkchqjjtjl3nrhqvihz22.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add_2
# add_1 => add_3
# mul => mul_2
# mul_1 => mul_3
# mul_2 => mul_4
# mul_3 => mul_5
# pow_1 => pow_1
# tanh => tanh
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_4,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_3), kwargs = {})
triton_poi_fused_add_mul_pow_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5q/c5qmnkuxezgezseizmolw3mx24fyy6xp3cfoz3egpqwcprxgwjre.py
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_2 => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
triton_poi_fused_add_mul_pow_tanh_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, primals_7, primals_3, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_tanh_2[grid(256)](buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, primals_4
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.w_1.weight
primals_1 = self.w_1.bias
primals_6 = self.w_2.weight
primals_2 = self.w_2.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| clairett/fast-bert | PositionwiseFeedForward | false | 15,041 | [
"Apache-2.0"
]
| 1,542 | 506771b930aa70e7ca2852e5e8ebb14656d97bfa | https://github.com/clairett/fast-bert/tree/506771b930aa70e7ca2852e5e8ebb14656d97bfa |
SparsemaxBisect | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/no/cnoevxmi56p67zph5bcvud6uollxe7h3vjriggenmd3qdtjq74rl.py
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, tau_m, sub_7, p_m, sum_2, f_m, sub_4, clamp, sum_1, f_lo, mul_2, tau_lo_1, dm_2, tau_m_1, sub_9, p_m_1, sum_3, f_m_1, mul_3, tau_lo_2, dm_3, tau_m_2, sub_11, p_m_2, sum_4, f_m_2, mul_4, tau_lo_3, dm_4, tau_m_3, sub_13, p_m_3, sum_5, f_m_3, mul_5, tau_lo_4, dm_5, tau_m_4, sub_15, p_m_4, sum_6, f_m_4, mul_6, tau_lo_5, dm_6, tau_m_5, sub_17, p_m_5, sum_7, f_m_5, mul_7, tau_lo_6, dm_7, tau_m_6, sub_19, p_m_6, sum_8, f_m_6, mul_8, tau_lo_7, dm_8, tau_m_7, sub_21, p_m_7, sum_9, f_m_7, mul_9, tau_lo_8, dm_9, tau_m_8, sub_23, p_m_8, sum_10, f_m_8, mul_10, tau_lo_9, dm_10, tau_m_9, sub_25, p_m_9, sum_11, f_m_9, mul_11, tau_lo_10, dm_11, tau_m_10, sub_27, p_m_10, sum_12, f_m_10, mul_12, tau_lo_11, dm_12, tau_m_11, sub_29, p_m_11, sum_13, f_m_11, mul_13, tau_lo_12, dm_13, tau_m_12, sub_31, p_m_12, sum_14, f_m_12, mul_14, tau_lo_13, dm_14, tau_m_13, sub_33, p_m_13, sum_15, f_m_13, mul_15, tau_lo_14, dm_15, tau_m_14, sub_35, p_m_14, sum_16, f_m_14, mul_16, tau_lo_15, dm_16, tau_m_15, sub_37, p_m_15, sum_17, f_m_15, mul_17, tau_lo_16, dm_17, tau_m_16, sub_39, p_m_16, sum_18, f_m_16, mul_18, tau_lo_17], Original ATen: [aten.max, aten.sub, aten.div, aten.add, aten.clamp, aten.sum, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm => sub_6
# dm_1 => div
# dm_10 => div_9
# dm_11 => div_10
# dm_12 => div_11
# dm_13 => div_12
# dm_14 => div_13
# dm_15 => div_14
# dm_16 => div_15
# dm_17 => div_16
# dm_2 => div_1
# dm_3 => div_2
# dm_4 => div_3
# dm_5 => div_4
# dm_6 => div_5
# dm_7 => div_6
# dm_8 => div_7
# dm_9 => div_8
# f_lo => sub_5
# f_m => sub_8
# f_m_1 => sub_10
# f_m_10 => sub_28
# f_m_11 => sub_30
# f_m_12 => sub_32
# f_m_13 => sub_34
# f_m_14 => sub_36
# f_m_15 => sub_38
# f_m_16 => sub_40
# f_m_2 => sub_12
# f_m_3 => sub_14
# f_m_4 => sub_16
# f_m_5 => sub_18
# f_m_6 => sub_20
# f_m_7 => sub_22
# f_m_8 => sub_24
# f_m_9 => sub_26
# max_1 => max_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_12 => mul_12
# mul_13 => mul_13
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_17 => mul_17
# mul_18 => mul_18
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# p_m => clamp_min_1
# p_m_1 => clamp_min_2
# p_m_10 => clamp_min_11
# p_m_11 => clamp_min_12
# p_m_12 => clamp_min_13
# p_m_13 => clamp_min_14
# p_m_14 => clamp_min_15
# p_m_15 => clamp_min_16
# p_m_16 => clamp_min_17
# p_m_2 => clamp_min_3
# p_m_3 => clamp_min_4
# p_m_4 => clamp_min_5
# p_m_5 => clamp_min_6
# p_m_6 => clamp_min_7
# p_m_7 => clamp_min_8
# p_m_8 => clamp_min_9
# p_m_9 => clamp_min_10
# sub_11 => sub_11
# sub_13 => sub_13
# sub_15 => sub_15
# sub_17 => sub_17
# sub_19 => sub_19
# sub_21 => sub_21
# sub_23 => sub_23
# sub_25 => sub_25
# sub_27 => sub_27
# sub_29 => sub_29
# sub_31 => sub_31
# sub_33 => sub_33
# sub_35 => sub_35
# sub_37 => sub_37
# sub_39 => sub_39
# sub_4 => sub_4
# sub_7 => sub_7
# sub_9 => sub_9
# sum_1 => sum_1
# sum_10 => sum_10
# sum_11 => sum_11
# sum_12 => sum_12
# sum_13 => sum_13
# sum_14 => sum_14
# sum_15 => sum_15
# sum_16 => sum_16
# sum_17 => sum_17
# sum_18 => sum_18
# sum_2 => sum_2
# sum_3 => sum_3
# sum_4 => sum_4
# sum_5 => sum_5
# sum_6 => sum_6
# sum_7 => sum_7
# sum_8 => sum_8
# sum_9 => sum_9
# tau_hi => sub_3
# tau_lo => sub_2
# tau_lo_1 => where
# tau_lo_10 => where_9
# tau_lo_11 => where_10
# tau_lo_12 => where_11
# tau_lo_13 => where_12
# tau_lo_14 => where_13
# tau_lo_15 => where_14
# tau_lo_16 => where_15
# tau_lo_17 => where_16
# tau_lo_2 => where_1
# tau_lo_3 => where_2
# tau_lo_4 => where_3
# tau_lo_5 => where_4
# tau_lo_6 => where_5
# tau_lo_7 => where_6
# tau_lo_8 => where_7
# tau_lo_9 => where_8
# tau_m => add
# tau_m_1 => add_1
# tau_m_10 => add_10
# tau_m_11 => add_11
# tau_m_12 => add_12
# tau_m_13 => add_13
# tau_m_14 => add_14
# tau_m_15 => add_15
# tau_m_16 => add_16
# tau_m_2 => add_2
# tau_m_3 => add_3
# tau_m_4 => add_4
# tau_m_5 => add_5
# tau_m_6 => add_6
# tau_m_7 => add_7
# tau_m_8 => add_8
# tau_m_9 => add_9
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 0.25), kwargs = {})
# %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %sub_2), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, 2), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, %div), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_1, [-1]), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_2, 1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_4, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_5 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %sub_5), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze, %add, %sub_2), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %div_1), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_1), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_2, [-1]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %sub_5), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_1, %add_1, %where), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %div_2), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_2), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_11, 0), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_3, [-1]), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_4, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %sub_5), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %add_2, %where_1), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_2, %div_3), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_3), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_13, 0), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_4, [-1]), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %sub_5), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_3, %add_3, %where_2), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_3, %div_4), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_4), kwargs = {})
# %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_5, [-1]), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_6, 1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %sub_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_4, %add_4, %where_3), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %div_5), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_5), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_17, 0), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_6, [-1]), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_7, 1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %sub_5), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_5, %add_5, %where_4), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %div_6), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_6), kwargs = {})
# %clamp_min_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_19, 0), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_7, [-1]), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_8, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %sub_5), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_6, %add_6, %where_5), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_6, %div_7), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_7), kwargs = {})
# %clamp_min_8 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_21, 0), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_8, [-1]), kwargs = {})
# %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_9, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %sub_5), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_7, %add_7, %where_6), kwargs = {})
# %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_7, %div_8), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_8), kwargs = {})
# %clamp_min_9 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_23, 0), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_9, [-1]), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_10, 1), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %sub_5), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_8, %add_8, %where_7), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_8, %div_9), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_9), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_25, 0), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_10, [-1]), kwargs = {})
# %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_11, 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %sub_5), kwargs = {})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_9, %add_9, %where_8), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_9, %div_10), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_10), kwargs = {})
# %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_27, 0), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_11, [-1]), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_12, 1), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_28, %sub_5), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_10, %add_10, %where_9), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_10, %div_11), kwargs = {})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_11), kwargs = {})
# %clamp_min_12 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_29, 0), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_12, [-1]), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_13, 1), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_30, %sub_5), kwargs = {})
# %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_11, %add_11, %where_10), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_11, %div_12), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_12), kwargs = {})
# %clamp_min_13 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_31, 0), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_13, [-1]), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_14, 1), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %sub_5), kwargs = {})
# %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_12, %add_12, %where_11), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_12, %div_13), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_13), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_33, 0), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_14, [-1]), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_15, 1), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %sub_5), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_13, %add_13, %where_12), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_13, %div_14), kwargs = {})
# %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_14), kwargs = {})
# %clamp_min_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_35, 0), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_15, [-1]), kwargs = {})
# %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_16, 1), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_36, %sub_5), kwargs = {})
# %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_14, %add_14, %where_13), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_14, %div_15), kwargs = {})
# %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_15), kwargs = {})
# %clamp_min_16 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_37, 0), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_16, [-1]), kwargs = {})
# %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_17, 1), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %sub_5), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_15, %add_15, %where_14), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_15, %div_16), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_16), kwargs = {})
# %clamp_min_17 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_39, 0), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_17, [-1]), kwargs = {})
# %sub_40 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_18, 1), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_40, %sub_5), kwargs = {})
# %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_16, %add_16, %where_15), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr8'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0(in_out_ptr8, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp9 = 0.25
tmp10 = tmp6 - tmp9
tmp11 = tmp10 - tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = tmp0 - tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp1 - tmp14
tmp19 = triton_helpers.maximum(tmp18, tmp16)
tmp20 = tmp17 + tmp19
tmp21 = tmp3 - tmp14
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = tmp20 + tmp22
tmp24 = tmp5 - tmp14
tmp25 = triton_helpers.maximum(tmp24, tmp16)
tmp26 = tmp23 + tmp25
tmp27 = tmp26 - tmp7
tmp28 = tmp0 - tmp8
tmp29 = triton_helpers.maximum(tmp28, tmp16)
tmp30 = tmp1 - tmp8
tmp31 = triton_helpers.maximum(tmp30, tmp16)
tmp32 = tmp29 + tmp31
tmp33 = tmp3 - tmp8
tmp34 = triton_helpers.maximum(tmp33, tmp16)
tmp35 = tmp32 + tmp34
tmp36 = tmp5 - tmp8
tmp37 = triton_helpers.maximum(tmp36, tmp16)
tmp38 = tmp35 + tmp37
tmp39 = tmp38 - tmp7
tmp40 = tmp27 * tmp39
tmp41 = tmp40 >= tmp16
tmp42 = tl.where(tmp41, tmp14, tmp8)
tmp43 = tmp13 * tmp12
tmp44 = tmp42 + tmp43
tmp45 = tmp0 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp16)
tmp47 = tmp1 - tmp44
tmp48 = triton_helpers.maximum(tmp47, tmp16)
tmp49 = tmp46 + tmp48
tmp50 = tmp3 - tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp16)
tmp52 = tmp49 + tmp51
tmp53 = tmp5 - tmp44
tmp54 = triton_helpers.maximum(tmp53, tmp16)
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp7
tmp57 = tmp56 * tmp39
tmp58 = tmp57 >= tmp16
tmp59 = tl.where(tmp58, tmp44, tmp42)
tmp60 = tmp43 * tmp12
tmp61 = tmp59 + tmp60
tmp62 = tmp0 - tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp16)
tmp64 = tmp1 - tmp61
tmp65 = triton_helpers.maximum(tmp64, tmp16)
tmp66 = tmp63 + tmp65
tmp67 = tmp3 - tmp61
tmp68 = triton_helpers.maximum(tmp67, tmp16)
tmp69 = tmp66 + tmp68
tmp70 = tmp5 - tmp61
tmp71 = triton_helpers.maximum(tmp70, tmp16)
tmp72 = tmp69 + tmp71
tmp73 = tmp72 - tmp7
tmp74 = tmp73 * tmp39
tmp75 = tmp74 >= tmp16
tmp76 = tl.where(tmp75, tmp61, tmp59)
tmp77 = tmp60 * tmp12
tmp78 = tmp76 + tmp77
tmp79 = tmp0 - tmp78
tmp80 = triton_helpers.maximum(tmp79, tmp16)
tmp81 = tmp1 - tmp78
tmp82 = triton_helpers.maximum(tmp81, tmp16)
tmp83 = tmp80 + tmp82
tmp84 = tmp3 - tmp78
tmp85 = triton_helpers.maximum(tmp84, tmp16)
tmp86 = tmp83 + tmp85
tmp87 = tmp5 - tmp78
tmp88 = triton_helpers.maximum(tmp87, tmp16)
tmp89 = tmp86 + tmp88
tmp90 = tmp89 - tmp7
tmp91 = tmp90 * tmp39
tmp92 = tmp91 >= tmp16
tmp93 = tl.where(tmp92, tmp78, tmp76)
tmp94 = tmp77 * tmp12
tmp95 = tmp93 + tmp94
tmp96 = tmp0 - tmp95
tmp97 = triton_helpers.maximum(tmp96, tmp16)
tmp98 = tmp1 - tmp95
tmp99 = triton_helpers.maximum(tmp98, tmp16)
tmp100 = tmp97 + tmp99
tmp101 = tmp3 - tmp95
tmp102 = triton_helpers.maximum(tmp101, tmp16)
tmp103 = tmp100 + tmp102
tmp104 = tmp5 - tmp95
tmp105 = triton_helpers.maximum(tmp104, tmp16)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 - tmp7
tmp108 = tmp107 * tmp39
tmp109 = tmp108 >= tmp16
tmp110 = tl.where(tmp109, tmp95, tmp93)
tmp111 = tmp94 * tmp12
tmp112 = tmp110 + tmp111
tmp113 = tmp0 - tmp112
tmp114 = triton_helpers.maximum(tmp113, tmp16)
tmp115 = tmp1 - tmp112
tmp116 = triton_helpers.maximum(tmp115, tmp16)
tmp117 = tmp114 + tmp116
tmp118 = tmp3 - tmp112
tmp119 = triton_helpers.maximum(tmp118, tmp16)
tmp120 = tmp117 + tmp119
tmp121 = tmp5 - tmp112
tmp122 = triton_helpers.maximum(tmp121, tmp16)
tmp123 = tmp120 + tmp122
tmp124 = tmp123 - tmp7
tmp125 = tmp124 * tmp39
tmp126 = tmp125 >= tmp16
tmp127 = tl.where(tmp126, tmp112, tmp110)
tmp128 = tmp111 * tmp12
tmp129 = tmp127 + tmp128
tmp130 = tmp0 - tmp129
tmp131 = triton_helpers.maximum(tmp130, tmp16)
tmp132 = tmp1 - tmp129
tmp133 = triton_helpers.maximum(tmp132, tmp16)
tmp134 = tmp131 + tmp133
tmp135 = tmp3 - tmp129
tmp136 = triton_helpers.maximum(tmp135, tmp16)
tmp137 = tmp134 + tmp136
tmp138 = tmp5 - tmp129
tmp139 = triton_helpers.maximum(tmp138, tmp16)
tmp140 = tmp137 + tmp139
tmp141 = tmp140 - tmp7
tmp142 = tmp141 * tmp39
tmp143 = tmp142 >= tmp16
tmp144 = tl.where(tmp143, tmp129, tmp127)
tmp145 = tmp128 * tmp12
tmp146 = tmp144 + tmp145
tmp147 = tmp0 - tmp146
tmp148 = triton_helpers.maximum(tmp147, tmp16)
tmp149 = tmp1 - tmp146
tmp150 = triton_helpers.maximum(tmp149, tmp16)
tmp151 = tmp148 + tmp150
tmp152 = tmp3 - tmp146
tmp153 = triton_helpers.maximum(tmp152, tmp16)
tmp154 = tmp151 + tmp153
tmp155 = tmp5 - tmp146
tmp156 = triton_helpers.maximum(tmp155, tmp16)
tmp157 = tmp154 + tmp156
tmp158 = tmp157 - tmp7
tmp159 = tmp158 * tmp39
tmp160 = tmp159 >= tmp16
tmp161 = tl.where(tmp160, tmp146, tmp144)
tmp162 = tmp145 * tmp12
tmp163 = tmp161 + tmp162
tmp164 = tmp0 - tmp163
tmp165 = triton_helpers.maximum(tmp164, tmp16)
tmp166 = tmp1 - tmp163
tmp167 = triton_helpers.maximum(tmp166, tmp16)
tmp168 = tmp165 + tmp167
tmp169 = tmp3 - tmp163
tmp170 = triton_helpers.maximum(tmp169, tmp16)
tmp171 = tmp168 + tmp170
tmp172 = tmp5 - tmp163
tmp173 = triton_helpers.maximum(tmp172, tmp16)
tmp174 = tmp171 + tmp173
tmp175 = tmp174 - tmp7
tmp176 = tmp175 * tmp39
tmp177 = tmp176 >= tmp16
tmp178 = tl.where(tmp177, tmp163, tmp161)
tmp179 = tmp162 * tmp12
tmp180 = tmp178 + tmp179
tmp181 = tmp0 - tmp180
tmp182 = triton_helpers.maximum(tmp181, tmp16)
tmp183 = tmp1 - tmp180
tmp184 = triton_helpers.maximum(tmp183, tmp16)
tmp185 = tmp182 + tmp184
tmp186 = tmp3 - tmp180
tmp187 = triton_helpers.maximum(tmp186, tmp16)
tmp188 = tmp185 + tmp187
tmp189 = tmp5 - tmp180
tmp190 = triton_helpers.maximum(tmp189, tmp16)
tmp191 = tmp188 + tmp190
tmp192 = tmp191 - tmp7
tmp193 = tmp192 * tmp39
tmp194 = tmp193 >= tmp16
tmp195 = tl.where(tmp194, tmp180, tmp178)
tmp196 = tmp179 * tmp12
tmp197 = tmp195 + tmp196
tmp198 = tmp0 - tmp197
tmp199 = triton_helpers.maximum(tmp198, tmp16)
tmp200 = tmp1 - tmp197
tmp201 = triton_helpers.maximum(tmp200, tmp16)
tmp202 = tmp199 + tmp201
tmp203 = tmp3 - tmp197
tmp204 = triton_helpers.maximum(tmp203, tmp16)
tmp205 = tmp202 + tmp204
tmp206 = tmp5 - tmp197
tmp207 = triton_helpers.maximum(tmp206, tmp16)
tmp208 = tmp205 + tmp207
tmp209 = tmp208 - tmp7
tmp210 = tmp209 * tmp39
tmp211 = tmp210 >= tmp16
tmp212 = tl.where(tmp211, tmp197, tmp195)
tmp213 = tmp196 * tmp12
tmp214 = tmp212 + tmp213
tmp215 = tmp0 - tmp214
tmp216 = triton_helpers.maximum(tmp215, tmp16)
tmp217 = tmp1 - tmp214
tmp218 = triton_helpers.maximum(tmp217, tmp16)
tmp219 = tmp216 + tmp218
tmp220 = tmp3 - tmp214
tmp221 = triton_helpers.maximum(tmp220, tmp16)
tmp222 = tmp219 + tmp221
tmp223 = tmp5 - tmp214
tmp224 = triton_helpers.maximum(tmp223, tmp16)
tmp225 = tmp222 + tmp224
tmp226 = tmp225 - tmp7
tmp227 = tmp226 * tmp39
tmp228 = tmp227 >= tmp16
tmp229 = tl.where(tmp228, tmp214, tmp212)
tmp230 = tmp213 * tmp12
tmp231 = tmp229 + tmp230
tmp232 = tmp0 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp16)
tmp234 = tmp1 - tmp231
tmp235 = triton_helpers.maximum(tmp234, tmp16)
tmp236 = tmp233 + tmp235
tmp237 = tmp3 - tmp231
tmp238 = triton_helpers.maximum(tmp237, tmp16)
tmp239 = tmp236 + tmp238
tmp240 = tmp5 - tmp231
tmp241 = triton_helpers.maximum(tmp240, tmp16)
tmp242 = tmp239 + tmp241
tmp243 = tmp242 - tmp7
tmp244 = tmp243 * tmp39
tmp245 = tmp244 >= tmp16
tmp246 = tl.where(tmp245, tmp231, tmp229)
tmp247 = tmp230 * tmp12
tmp248 = tmp246 + tmp247
tmp249 = tmp0 - tmp248
tmp250 = triton_helpers.maximum(tmp249, tmp16)
tmp251 = tmp1 - tmp248
tmp252 = triton_helpers.maximum(tmp251, tmp16)
tmp253 = tmp250 + tmp252
tmp254 = tmp3 - tmp248
tmp255 = triton_helpers.maximum(tmp254, tmp16)
tmp256 = tmp253 + tmp255
tmp257 = tmp5 - tmp248
tmp258 = triton_helpers.maximum(tmp257, tmp16)
tmp259 = tmp256 + tmp258
tmp260 = tmp259 - tmp7
tmp261 = tmp260 * tmp39
tmp262 = tmp261 >= tmp16
tmp263 = tl.where(tmp262, tmp248, tmp246)
tmp264 = tmp247 * tmp12
tmp265 = tmp263 + tmp264
tmp266 = tmp0 - tmp265
tmp267 = triton_helpers.maximum(tmp266, tmp16)
tmp268 = tmp1 - tmp265
tmp269 = triton_helpers.maximum(tmp268, tmp16)
tmp270 = tmp267 + tmp269
tmp271 = tmp3 - tmp265
tmp272 = triton_helpers.maximum(tmp271, tmp16)
tmp273 = tmp270 + tmp272
tmp274 = tmp5 - tmp265
tmp275 = triton_helpers.maximum(tmp274, tmp16)
tmp276 = tmp273 + tmp275
tmp277 = tmp276 - tmp7
tmp278 = tmp277 * tmp39
tmp279 = tmp278 >= tmp16
tmp280 = tl.where(tmp279, tmp265, tmp263)
tmp281 = tmp264 * tmp12
tmp282 = tmp280 + tmp281
tmp283 = tmp0 - tmp282
tmp284 = triton_helpers.maximum(tmp283, tmp16)
tmp285 = tmp1 - tmp282
tmp286 = triton_helpers.maximum(tmp285, tmp16)
tmp287 = tmp284 + tmp286
tmp288 = tmp3 - tmp282
tmp289 = triton_helpers.maximum(tmp288, tmp16)
tmp290 = tmp287 + tmp289
tmp291 = tmp5 - tmp282
tmp292 = triton_helpers.maximum(tmp291, tmp16)
tmp293 = tmp290 + tmp292
tmp294 = tmp293 - tmp7
tmp295 = tmp294 * tmp39
tmp296 = tmp295 >= tmp16
tmp297 = tl.where(tmp296, tmp282, tmp280)
tmp298 = tmp281 * tmp12
tmp299 = tmp297 + tmp298
tmp300 = tmp0 - tmp299
tmp301 = triton_helpers.maximum(tmp300, tmp16)
tmp302 = tmp1 - tmp299
tmp303 = triton_helpers.maximum(tmp302, tmp16)
tmp304 = tmp301 + tmp303
tmp305 = tmp3 - tmp299
tmp306 = triton_helpers.maximum(tmp305, tmp16)
tmp307 = tmp304 + tmp306
tmp308 = tmp5 - tmp299
tmp309 = triton_helpers.maximum(tmp308, tmp16)
tmp310 = tmp307 + tmp309
tmp311 = tmp310 - tmp7
tmp312 = tmp311 * tmp39
tmp313 = tmp312 >= tmp16
tmp314 = tl.where(tmp313, tmp299, tmp297)
tl.store(in_out_ptr8 + (x0), tmp314, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/av/cavd5cj3tg3vokgvnwac6qqbcheawux6j46atz5eq3ovasf7jkxk.py
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, sub_4, clamp, sum_1, f_lo, dm_2, dm_3, dm_4, dm_5, dm_6, dm_7, dm_8, dm_9, dm_10, dm_11, dm_12, dm_13, dm_14, dm_15, dm_16, dm_17, dm_18, tau_m_17, sub_41, p_m_17, sum_19, f_m_17, mul_19, tau_lo_18, dm_19, tau_m_18, sub_43, p_m_18, sum_20, f_m_18, mul_20, tau_lo_19, dm_20, tau_m_19, sub_45, p_m_19, sum_21, f_m_19, tau_lo_20, dm_21, tau_m_20, sub_47, p_m_20, sum_22, f_m_20, mul_22, tau_lo_21, dm_22, tau_m_21, sub_49, p_m_21, sum_23, f_m_21, mul_23, tau_lo_22, dm_23, tau_m_22, sub_51, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_53, p_m_23, sum_25, f_m_23, mul_25, tau_lo_24, dm_25, tau_m_24, sub_55, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_57, p_m_25, sum_27, f_m_25, mul_27, tau_lo_26, dm_27, tau_m_26, sub_59, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_61, p_m_27, sum_29, f_m_27, tau_lo_28, dm_29, tau_m_28, sub_63, p_m_28, sum_30, tau_lo_29, dm_30, tau_m_29, sub_65, p_m_29, sum_31, tau_lo_30, dm_31, tau_m_30, sub_67, p_m_30, sum_32, tau_lo_31, dm_32, tau_m_31, sub_69, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_71, p_m_32, sum_34, tau_lo_33, dm_34, tau_m_33, sub_73, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_75, p_m_34, sum_36, tau_lo_35, dm_36, tau_m_35, sub_77, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_79, p_m_36, sum_38, tau_lo_37, dm_38, tau_m_37, sub_81, p_m_37, sum_39, tau_lo_38, dm_39, tau_m_38, sub_83, p_m_38, sum_40, tau_lo_39, dm_40, tau_m_39, sub_85, p_m_39, sum_41, tau_lo_40], Original ATen: [aten.max, aten.sub, aten.div, aten.clamp, aten.sum, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm => sub_6
# dm_1 => div
# dm_10 => div_9
# dm_11 => div_10
# dm_12 => div_11
# dm_13 => div_12
# dm_14 => div_13
# dm_15 => div_14
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_2 => div_1
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_3 => div_2
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_4 => div_3
# dm_40 => div_39
# dm_5 => div_4
# dm_6 => div_5
# dm_7 => div_6
# dm_8 => div_7
# dm_9 => div_8
# f_lo => sub_5
# f_m_17 => sub_42
# f_m_18 => sub_44
# f_m_19 => sub_46
# f_m_20 => sub_48
# f_m_21 => sub_50
# f_m_23 => sub_54
# f_m_25 => sub_58
# f_m_27 => sub_62
# max_1 => max_1
# mul_19 => mul_19
# mul_20 => mul_20
# mul_22 => mul_22
# mul_23 => mul_23
# mul_25 => mul_25
# mul_27 => mul_27
# p_m_17 => clamp_min_18
# p_m_18 => clamp_min_19
# p_m_19 => clamp_min_20
# p_m_20 => clamp_min_21
# p_m_21 => clamp_min_22
# p_m_22 => clamp_min_23
# p_m_23 => clamp_min_24
# p_m_24 => clamp_min_25
# p_m_25 => clamp_min_26
# p_m_26 => clamp_min_27
# p_m_27 => clamp_min_28
# p_m_28 => clamp_min_29
# p_m_29 => clamp_min_30
# p_m_30 => clamp_min_31
# p_m_31 => clamp_min_32
# p_m_32 => clamp_min_33
# p_m_33 => clamp_min_34
# p_m_34 => clamp_min_35
# p_m_35 => clamp_min_36
# p_m_36 => clamp_min_37
# p_m_37 => clamp_min_38
# p_m_38 => clamp_min_39
# p_m_39 => clamp_min_40
# sub_4 => sub_4
# sub_41 => sub_41
# sub_43 => sub_43
# sub_45 => sub_45
# sub_47 => sub_47
# sub_49 => sub_49
# sub_51 => sub_51
# sub_53 => sub_53
# sub_55 => sub_55
# sub_57 => sub_57
# sub_59 => sub_59
# sub_61 => sub_61
# sub_63 => sub_63
# sub_65 => sub_65
# sub_67 => sub_67
# sub_69 => sub_69
# sub_71 => sub_71
# sub_73 => sub_73
# sub_75 => sub_75
# sub_77 => sub_77
# sub_79 => sub_79
# sub_81 => sub_81
# sub_83 => sub_83
# sub_85 => sub_85
# sum_1 => sum_1
# sum_19 => sum_19
# sum_20 => sum_20
# sum_21 => sum_21
# sum_22 => sum_22
# sum_23 => sum_23
# sum_24 => sum_24
# sum_25 => sum_25
# sum_26 => sum_26
# sum_27 => sum_27
# sum_28 => sum_28
# sum_29 => sum_29
# sum_30 => sum_30
# sum_31 => sum_31
# sum_32 => sum_32
# sum_33 => sum_33
# sum_34 => sum_34
# sum_35 => sum_35
# sum_36 => sum_36
# sum_37 => sum_37
# sum_38 => sum_38
# sum_39 => sum_39
# sum_40 => sum_40
# sum_41 => sum_41
# tau_hi => sub_3
# tau_lo => sub_2
# tau_lo_18 => where_17
# tau_lo_19 => where_18
# tau_lo_20 => where_19
# tau_lo_21 => where_20
# tau_lo_22 => where_21
# tau_lo_23 => where_22
# tau_lo_24 => where_23
# tau_lo_25 => where_24
# tau_lo_26 => where_25
# tau_lo_27 => where_26
# tau_lo_28 => where_27
# tau_lo_29 => where_28
# tau_lo_30 => where_29
# tau_lo_31 => where_30
# tau_lo_32 => where_31
# tau_lo_33 => where_32
# tau_lo_34 => where_33
# tau_lo_35 => where_34
# tau_lo_36 => where_35
# tau_lo_37 => where_36
# tau_lo_38 => where_37
# tau_lo_39 => where_38
# tau_lo_40 => where_39
# tau_m_17 => add_17
# tau_m_18 => add_18
# tau_m_19 => add_19
# tau_m_20 => add_20
# tau_m_21 => add_21
# tau_m_22 => add_22
# tau_m_23 => add_23
# tau_m_24 => add_24
# tau_m_25 => add_25
# tau_m_26 => add_26
# tau_m_27 => add_27
# tau_m_28 => add_28
# tau_m_29 => add_29
# tau_m_30 => add_30
# tau_m_31 => add_31
# tau_m_32 => add_32
# tau_m_33 => add_33
# tau_m_34 => add_34
# tau_m_35 => add_35
# tau_m_36 => add_36
# tau_m_37 => add_37
# tau_m_38 => add_38
# tau_m_39 => add_39
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 0.25), kwargs = {})
# %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %sub_2), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, 2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_4, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_5 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {})
# %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_16, %div_17), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_17), kwargs = {})
# %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_41, 0), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_18, [-1]), kwargs = {})
# %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_19, 1), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %sub_5), kwargs = {})
# %where_17 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_17, %add_17, %where_16), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_17, %div_18), kwargs = {})
# %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_18), kwargs = {})
# %clamp_min_19 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_43, 0), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_19, [-1]), kwargs = {})
# %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_20, 1), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %sub_5), kwargs = {})
# %where_18 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_18, %add_18, %where_17), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_18, %div_19), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_19), kwargs = {})
# %clamp_min_20 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_45, 0), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_20, [-1]), kwargs = {})
# %sub_46 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_21, 1), kwargs = {})
# %where_19 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_19, %add_19, %where_18), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_19, %div_20), kwargs = {})
# %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_20), kwargs = {})
# %clamp_min_21 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_47, 0), kwargs = {})
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_21, [-1]), kwargs = {})
# %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_22, 1), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_48, %sub_5), kwargs = {})
# %where_20 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_20, %add_20, %where_19), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %add_21 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_20, %div_21), kwargs = {})
# %sub_49 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_21), kwargs = {})
# %clamp_min_22 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_49, 0), kwargs = {})
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_22, [-1]), kwargs = {})
# %sub_50 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_23, 1), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %sub_5), kwargs = {})
# %where_21 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_21, %add_21, %where_20), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_21, %div_22), kwargs = {})
# %sub_51 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_22), kwargs = {})
# %clamp_min_23 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_51, 0), kwargs = {})
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_23, [-1]), kwargs = {})
# %where_22 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_22, %add_22, %where_21), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %add_23 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_22, %div_23), kwargs = {})
# %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_23), kwargs = {})
# %clamp_min_24 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_53, 0), kwargs = {})
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_24, [-1]), kwargs = {})
# %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_25, 1), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_54, %sub_5), kwargs = {})
# %where_23 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_23, %add_23, %where_22), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_23, %div_24), kwargs = {})
# %sub_55 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_24), kwargs = {})
# %clamp_min_25 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_55, 0), kwargs = {})
# %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_25, [-1]), kwargs = {})
# %where_24 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_24, %add_24, %where_23), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {})
# %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_25), kwargs = {})
# %clamp_min_26 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_57, 0), kwargs = {})
# %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_26, [-1]), kwargs = {})
# %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_27, 1), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_58, %sub_5), kwargs = {})
# %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {})
# %sub_59 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_26), kwargs = {})
# %clamp_min_27 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_59, 0), kwargs = {})
# %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_27, [-1]), kwargs = {})
# %where_26 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_26, %add_26, %where_25), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {})
# %sub_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_27), kwargs = {})
# %clamp_min_28 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_61, 0), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_28, [-1]), kwargs = {})
# %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_29, 1), kwargs = {})
# %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {})
# %sub_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_28), kwargs = {})
# %clamp_min_29 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_63, 0), kwargs = {})
# %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_29, [-1]), kwargs = {})
# %where_28 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_28, %add_28, %where_27), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {})
# %sub_65 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_29), kwargs = {})
# %clamp_min_30 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_65, 0), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_30, [-1]), kwargs = {})
# %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {})
# %sub_67 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_30), kwargs = {})
# %clamp_min_31 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_67, 0), kwargs = {})
# %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_31, [-1]), kwargs = {})
# %where_30 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_30, %add_30, %where_29), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {})
# %sub_69 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_31), kwargs = {})
# %clamp_min_32 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_69, 0), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_32, [-1]), kwargs = {})
# %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_31, %add_31, %where_30), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_31, %div_32), kwargs = {})
# %sub_71 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_32), kwargs = {})
# %clamp_min_33 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_71, 0), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_33, [-1]), kwargs = {})
# %where_32 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_32, %add_32, %where_31), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %add_33 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_32, %div_33), kwargs = {})
# %sub_73 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_33), kwargs = {})
# %clamp_min_34 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_73, 0), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_34, [-1]), kwargs = {})
# %where_33 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_33, %add_33, %where_32), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {})
# %sub_75 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_34), kwargs = {})
# %clamp_min_35 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_75, 0), kwargs = {})
# %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_35, [-1]), kwargs = {})
# %where_34 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_34, %add_34, %where_33), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {})
# %sub_77 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_35), kwargs = {})
# %clamp_min_36 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_77, 0), kwargs = {})
# %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_36, [-1]), kwargs = {})
# %where_35 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_35, %add_35, %where_34), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {})
# %sub_79 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_36), kwargs = {})
# %clamp_min_37 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_79, 0), kwargs = {})
# %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_37, [-1]), kwargs = {})
# %where_36 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_36, %add_36, %where_35), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {})
# %sub_81 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_37), kwargs = {})
# %clamp_min_38 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_81, 0), kwargs = {})
# %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_38, [-1]), kwargs = {})
# %where_37 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_37, %add_37, %where_36), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {})
# %sub_83 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_38), kwargs = {})
# %clamp_min_39 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_83, 0), kwargs = {})
# %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_39, [-1]), kwargs = {})
# %where_38 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_38, %add_38, %where_37), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {})
# %sub_85 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_39), kwargs = {})
# %clamp_min_40 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_85, 0), kwargs = {})
# %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_40, [-1]), kwargs = {})
# %where_39 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_39, %add_39, %where_38), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1', 'mutated_arg_names': ['in_out_ptr16'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1(in_out_ptr16, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.25
tmp8 = tmp6 - tmp7
tmp9 = 1.0
tmp10 = tmp6 - tmp9
tmp11 = tmp8 - tmp10
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp12
tmp15 = tmp14 * tmp12
tmp16 = tmp15 * tmp12
tmp17 = tmp16 * tmp12
tmp18 = tmp17 * tmp12
tmp19 = tmp18 * tmp12
tmp20 = tmp19 * tmp12
tmp21 = tmp20 * tmp12
tmp22 = tmp21 * tmp12
tmp23 = tmp22 * tmp12
tmp24 = tmp23 * tmp12
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp12
tmp27 = tmp26 * tmp12
tmp28 = tmp27 * tmp12
tmp29 = tmp28 * tmp12
tmp30 = tmp29 * tmp12
tmp32 = tmp31 + tmp30
tmp33 = tmp0 - tmp32
tmp34 = 0.0
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp1 - tmp32
tmp37 = triton_helpers.maximum(tmp36, tmp34)
tmp38 = tmp35 + tmp37
tmp39 = tmp3 - tmp32
tmp40 = triton_helpers.maximum(tmp39, tmp34)
tmp41 = tmp38 + tmp40
tmp42 = tmp5 - tmp32
tmp43 = triton_helpers.maximum(tmp42, tmp34)
tmp44 = tmp41 + tmp43
tmp45 = tmp44 - tmp9
tmp46 = tmp0 - tmp10
tmp47 = triton_helpers.maximum(tmp46, tmp34)
tmp48 = tmp1 - tmp10
tmp49 = triton_helpers.maximum(tmp48, tmp34)
tmp50 = tmp47 + tmp49
tmp51 = tmp3 - tmp10
tmp52 = triton_helpers.maximum(tmp51, tmp34)
tmp53 = tmp50 + tmp52
tmp54 = tmp5 - tmp10
tmp55 = triton_helpers.maximum(tmp54, tmp34)
tmp56 = tmp53 + tmp55
tmp57 = tmp56 - tmp9
tmp58 = tmp45 * tmp57
tmp59 = tmp58 >= tmp34
tmp60 = tl.where(tmp59, tmp32, tmp31)
tmp61 = tmp30 * tmp12
tmp62 = tmp60 + tmp61
tmp63 = tmp0 - tmp62
tmp64 = triton_helpers.maximum(tmp63, tmp34)
tmp65 = tmp1 - tmp62
tmp66 = triton_helpers.maximum(tmp65, tmp34)
tmp67 = tmp64 + tmp66
tmp68 = tmp3 - tmp62
tmp69 = triton_helpers.maximum(tmp68, tmp34)
tmp70 = tmp67 + tmp69
tmp71 = tmp5 - tmp62
tmp72 = triton_helpers.maximum(tmp71, tmp34)
tmp73 = tmp70 + tmp72
tmp74 = tmp73 - tmp9
tmp75 = tmp74 * tmp57
tmp76 = tmp75 >= tmp34
tmp77 = tl.where(tmp76, tmp62, tmp60)
tmp78 = tmp61 * tmp12
tmp79 = tmp77 + tmp78
tmp80 = tmp0 - tmp79
tmp81 = triton_helpers.maximum(tmp80, tmp34)
tmp82 = tmp1 - tmp79
tmp83 = triton_helpers.maximum(tmp82, tmp34)
tmp84 = tmp81 + tmp83
tmp85 = tmp3 - tmp79
tmp86 = triton_helpers.maximum(tmp85, tmp34)
tmp87 = tmp84 + tmp86
tmp88 = tmp5 - tmp79
tmp89 = triton_helpers.maximum(tmp88, tmp34)
tmp90 = tmp87 + tmp89
tmp91 = tmp90 - tmp9
tmp92 = tmp91 * tmp57
tmp93 = tmp92 >= tmp34
tmp94 = tl.where(tmp93, tmp79, tmp77)
tmp95 = tmp78 * tmp12
tmp96 = tmp94 + tmp95
tmp97 = tmp0 - tmp96
tmp98 = triton_helpers.maximum(tmp97, tmp34)
tmp99 = tmp1 - tmp96
tmp100 = triton_helpers.maximum(tmp99, tmp34)
tmp101 = tmp98 + tmp100
tmp102 = tmp3 - tmp96
tmp103 = triton_helpers.maximum(tmp102, tmp34)
tmp104 = tmp101 + tmp103
tmp105 = tmp5 - tmp96
tmp106 = triton_helpers.maximum(tmp105, tmp34)
tmp107 = tmp104 + tmp106
tmp108 = tmp107 - tmp9
tmp109 = tmp108 * tmp57
tmp110 = tmp109 >= tmp34
tmp111 = tl.where(tmp110, tmp96, tmp94)
tmp112 = tmp95 * tmp12
tmp113 = tmp111 + tmp112
tmp114 = tmp0 - tmp113
tmp115 = triton_helpers.maximum(tmp114, tmp34)
tmp116 = tmp1 - tmp113
tmp117 = triton_helpers.maximum(tmp116, tmp34)
tmp118 = tmp115 + tmp117
tmp119 = tmp3 - tmp113
tmp120 = triton_helpers.maximum(tmp119, tmp34)
tmp121 = tmp118 + tmp120
tmp122 = tmp5 - tmp113
tmp123 = triton_helpers.maximum(tmp122, tmp34)
tmp124 = tmp121 + tmp123
tmp125 = tmp124 - tmp9
tmp126 = tmp125 * tmp57
tmp127 = tmp126 >= tmp34
tmp128 = tl.where(tmp127, tmp113, tmp111)
tmp129 = tmp112 * tmp12
tmp130 = tmp128 + tmp129
tmp131 = tmp0 - tmp130
tmp132 = triton_helpers.maximum(tmp131, tmp34)
tmp133 = tmp1 - tmp130
tmp134 = triton_helpers.maximum(tmp133, tmp34)
tmp135 = tmp132 + tmp134
tmp136 = tmp3 - tmp130
tmp137 = triton_helpers.maximum(tmp136, tmp34)
tmp138 = tmp135 + tmp137
tmp139 = tmp5 - tmp130
tmp140 = triton_helpers.maximum(tmp139, tmp34)
tmp141 = tmp138 + tmp140
tmp142 = tmp141 - tmp9
tmp143 = tmp142 * tmp57
tmp144 = tmp143 >= tmp34
tmp145 = tl.where(tmp144, tmp130, tmp128)
tmp146 = tmp129 * tmp12
tmp147 = tmp145 + tmp146
tmp148 = tmp0 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp34)
tmp150 = tmp1 - tmp147
tmp151 = triton_helpers.maximum(tmp150, tmp34)
tmp152 = tmp149 + tmp151
tmp153 = tmp3 - tmp147
tmp154 = triton_helpers.maximum(tmp153, tmp34)
tmp155 = tmp152 + tmp154
tmp156 = tmp5 - tmp147
tmp157 = triton_helpers.maximum(tmp156, tmp34)
tmp158 = tmp155 + tmp157
tmp159 = tmp158 - tmp9
tmp160 = tmp159 * tmp57
tmp161 = tmp160 >= tmp34
tmp162 = tl.where(tmp161, tmp147, tmp145)
tmp163 = tmp146 * tmp12
tmp164 = tmp162 + tmp163
tmp165 = tmp0 - tmp164
tmp166 = triton_helpers.maximum(tmp165, tmp34)
tmp167 = tmp1 - tmp164
tmp168 = triton_helpers.maximum(tmp167, tmp34)
tmp169 = tmp166 + tmp168
tmp170 = tmp3 - tmp164
tmp171 = triton_helpers.maximum(tmp170, tmp34)
tmp172 = tmp169 + tmp171
tmp173 = tmp5 - tmp164
tmp174 = triton_helpers.maximum(tmp173, tmp34)
tmp175 = tmp172 + tmp174
tmp176 = tmp175 - tmp9
tmp177 = tmp176 * tmp57
tmp178 = tmp177 >= tmp34
tmp179 = tl.where(tmp178, tmp164, tmp162)
tmp180 = tmp163 * tmp12
tmp181 = tmp179 + tmp180
tmp182 = tmp0 - tmp181
tmp183 = triton_helpers.maximum(tmp182, tmp34)
tmp184 = tmp1 - tmp181
tmp185 = triton_helpers.maximum(tmp184, tmp34)
tmp186 = tmp183 + tmp185
tmp187 = tmp3 - tmp181
tmp188 = triton_helpers.maximum(tmp187, tmp34)
tmp189 = tmp186 + tmp188
tmp190 = tmp5 - tmp181
tmp191 = triton_helpers.maximum(tmp190, tmp34)
tmp192 = tmp189 + tmp191
tmp193 = tmp192 - tmp9
tmp194 = tmp193 * tmp57
tmp195 = tmp194 >= tmp34
tmp196 = tl.where(tmp195, tmp181, tmp179)
tmp197 = tmp180 * tmp12
tmp198 = tmp196 + tmp197
tmp199 = tmp0 - tmp198
tmp200 = triton_helpers.maximum(tmp199, tmp34)
tmp201 = tmp1 - tmp198
tmp202 = triton_helpers.maximum(tmp201, tmp34)
tmp203 = tmp200 + tmp202
tmp204 = tmp3 - tmp198
tmp205 = triton_helpers.maximum(tmp204, tmp34)
tmp206 = tmp203 + tmp205
tmp207 = tmp5 - tmp198
tmp208 = triton_helpers.maximum(tmp207, tmp34)
tmp209 = tmp206 + tmp208
tmp210 = tmp209 - tmp9
tmp211 = tmp210 * tmp57
tmp212 = tmp211 >= tmp34
tmp213 = tl.where(tmp212, tmp198, tmp196)
tmp214 = tmp197 * tmp12
tmp215 = tmp213 + tmp214
tmp216 = tmp0 - tmp215
tmp217 = triton_helpers.maximum(tmp216, tmp34)
tmp218 = tmp1 - tmp215
tmp219 = triton_helpers.maximum(tmp218, tmp34)
tmp220 = tmp217 + tmp219
tmp221 = tmp3 - tmp215
tmp222 = triton_helpers.maximum(tmp221, tmp34)
tmp223 = tmp220 + tmp222
tmp224 = tmp5 - tmp215
tmp225 = triton_helpers.maximum(tmp224, tmp34)
tmp226 = tmp223 + tmp225
tmp227 = tmp226 - tmp9
tmp228 = tmp227 * tmp57
tmp229 = tmp228 >= tmp34
tmp230 = tl.where(tmp229, tmp215, tmp213)
tmp231 = tmp214 * tmp12
tmp232 = tmp230 + tmp231
tmp233 = tmp0 - tmp232
tmp234 = triton_helpers.maximum(tmp233, tmp34)
tmp235 = tmp1 - tmp232
tmp236 = triton_helpers.maximum(tmp235, tmp34)
tmp237 = tmp234 + tmp236
tmp238 = tmp3 - tmp232
tmp239 = triton_helpers.maximum(tmp238, tmp34)
tmp240 = tmp237 + tmp239
tmp241 = tmp5 - tmp232
tmp242 = triton_helpers.maximum(tmp241, tmp34)
tmp243 = tmp240 + tmp242
tmp244 = tmp243 - tmp9
tmp245 = tmp244 * tmp57
tmp246 = tmp245 >= tmp34
tmp247 = tl.where(tmp246, tmp232, tmp230)
tmp248 = tmp231 * tmp12
tmp249 = tmp247 + tmp248
tmp250 = tmp0 - tmp249
tmp251 = triton_helpers.maximum(tmp250, tmp34)
tmp252 = tmp1 - tmp249
tmp253 = triton_helpers.maximum(tmp252, tmp34)
tmp254 = tmp251 + tmp253
tmp255 = tmp3 - tmp249
tmp256 = triton_helpers.maximum(tmp255, tmp34)
tmp257 = tmp254 + tmp256
tmp258 = tmp5 - tmp249
tmp259 = triton_helpers.maximum(tmp258, tmp34)
tmp260 = tmp257 + tmp259
tmp261 = tmp260 - tmp9
tmp262 = tmp261 * tmp57
tmp263 = tmp262 >= tmp34
tmp264 = tl.where(tmp263, tmp249, tmp247)
tmp265 = tmp248 * tmp12
tmp266 = tmp264 + tmp265
tmp267 = tmp0 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp34)
tmp269 = tmp1 - tmp266
tmp270 = triton_helpers.maximum(tmp269, tmp34)
tmp271 = tmp268 + tmp270
tmp272 = tmp3 - tmp266
tmp273 = triton_helpers.maximum(tmp272, tmp34)
tmp274 = tmp271 + tmp273
tmp275 = tmp5 - tmp266
tmp276 = triton_helpers.maximum(tmp275, tmp34)
tmp277 = tmp274 + tmp276
tmp278 = tmp277 - tmp9
tmp279 = tmp278 * tmp57
tmp280 = tmp279 >= tmp34
tmp281 = tl.where(tmp280, tmp266, tmp264)
tmp282 = tmp265 * tmp12
tmp283 = tmp281 + tmp282
tmp284 = tmp0 - tmp283
tmp285 = triton_helpers.maximum(tmp284, tmp34)
tmp286 = tmp1 - tmp283
tmp287 = triton_helpers.maximum(tmp286, tmp34)
tmp288 = tmp285 + tmp287
tmp289 = tmp3 - tmp283
tmp290 = triton_helpers.maximum(tmp289, tmp34)
tmp291 = tmp288 + tmp290
tmp292 = tmp5 - tmp283
tmp293 = triton_helpers.maximum(tmp292, tmp34)
tmp294 = tmp291 + tmp293
tmp295 = tmp294 - tmp9
tmp296 = tmp295 * tmp57
tmp297 = tmp296 >= tmp34
tmp298 = tl.where(tmp297, tmp283, tmp281)
tmp299 = tmp282 * tmp12
tmp300 = tmp298 + tmp299
tmp301 = tmp0 - tmp300
tmp302 = triton_helpers.maximum(tmp301, tmp34)
tmp303 = tmp1 - tmp300
tmp304 = triton_helpers.maximum(tmp303, tmp34)
tmp305 = tmp302 + tmp304
tmp306 = tmp3 - tmp300
tmp307 = triton_helpers.maximum(tmp306, tmp34)
tmp308 = tmp305 + tmp307
tmp309 = tmp5 - tmp300
tmp310 = triton_helpers.maximum(tmp309, tmp34)
tmp311 = tmp308 + tmp310
tmp312 = tmp311 - tmp9
tmp313 = tmp312 * tmp57
tmp314 = tmp313 >= tmp34
tmp315 = tl.where(tmp314, tmp300, tmp298)
tmp316 = tmp299 * tmp12
tmp317 = tmp315 + tmp316
tmp318 = tmp0 - tmp317
tmp319 = triton_helpers.maximum(tmp318, tmp34)
tmp320 = tmp1 - tmp317
tmp321 = triton_helpers.maximum(tmp320, tmp34)
tmp322 = tmp319 + tmp321
tmp323 = tmp3 - tmp317
tmp324 = triton_helpers.maximum(tmp323, tmp34)
tmp325 = tmp322 + tmp324
tmp326 = tmp5 - tmp317
tmp327 = triton_helpers.maximum(tmp326, tmp34)
tmp328 = tmp325 + tmp327
tmp329 = tmp328 - tmp9
tmp330 = tmp329 * tmp57
tmp331 = tmp330 >= tmp34
tmp332 = tl.where(tmp331, tmp317, tmp315)
tmp333 = tmp316 * tmp12
tmp334 = tmp332 + tmp333
tmp335 = tmp0 - tmp334
tmp336 = triton_helpers.maximum(tmp335, tmp34)
tmp337 = tmp1 - tmp334
tmp338 = triton_helpers.maximum(tmp337, tmp34)
tmp339 = tmp336 + tmp338
tmp340 = tmp3 - tmp334
tmp341 = triton_helpers.maximum(tmp340, tmp34)
tmp342 = tmp339 + tmp341
tmp343 = tmp5 - tmp334
tmp344 = triton_helpers.maximum(tmp343, tmp34)
tmp345 = tmp342 + tmp344
tmp346 = tmp345 - tmp9
tmp347 = tmp346 * tmp57
tmp348 = tmp347 >= tmp34
tmp349 = tl.where(tmp348, tmp334, tmp332)
tmp350 = tmp333 * tmp12
tmp351 = tmp349 + tmp350
tmp352 = tmp0 - tmp351
tmp353 = triton_helpers.maximum(tmp352, tmp34)
tmp354 = tmp1 - tmp351
tmp355 = triton_helpers.maximum(tmp354, tmp34)
tmp356 = tmp353 + tmp355
tmp357 = tmp3 - tmp351
tmp358 = triton_helpers.maximum(tmp357, tmp34)
tmp359 = tmp356 + tmp358
tmp360 = tmp5 - tmp351
tmp361 = triton_helpers.maximum(tmp360, tmp34)
tmp362 = tmp359 + tmp361
tmp363 = tmp362 - tmp9
tmp364 = tmp363 * tmp57
tmp365 = tmp364 >= tmp34
tmp366 = tl.where(tmp365, tmp351, tmp349)
tmp367 = tmp350 * tmp12
tmp368 = tmp366 + tmp367
tmp369 = tmp0 - tmp368
tmp370 = triton_helpers.maximum(tmp369, tmp34)
tmp371 = tmp1 - tmp368
tmp372 = triton_helpers.maximum(tmp371, tmp34)
tmp373 = tmp370 + tmp372
tmp374 = tmp3 - tmp368
tmp375 = triton_helpers.maximum(tmp374, tmp34)
tmp376 = tmp373 + tmp375
tmp377 = tmp5 - tmp368
tmp378 = triton_helpers.maximum(tmp377, tmp34)
tmp379 = tmp376 + tmp378
tmp380 = tmp379 - tmp9
tmp381 = tmp380 * tmp57
tmp382 = tmp381 >= tmp34
tmp383 = tl.where(tmp382, tmp368, tmp366)
tmp384 = tmp367 * tmp12
tmp385 = tmp383 + tmp384
tmp386 = tmp0 - tmp385
tmp387 = triton_helpers.maximum(tmp386, tmp34)
tmp388 = tmp1 - tmp385
tmp389 = triton_helpers.maximum(tmp388, tmp34)
tmp390 = tmp387 + tmp389
tmp391 = tmp3 - tmp385
tmp392 = triton_helpers.maximum(tmp391, tmp34)
tmp393 = tmp390 + tmp392
tmp394 = tmp5 - tmp385
tmp395 = triton_helpers.maximum(tmp394, tmp34)
tmp396 = tmp393 + tmp395
tmp397 = tmp396 - tmp9
tmp398 = tmp397 * tmp57
tmp399 = tmp398 >= tmp34
tmp400 = tl.where(tmp399, tmp385, tmp383)
tmp401 = tmp384 * tmp12
tmp402 = tmp400 + tmp401
tmp403 = tmp0 - tmp402
tmp404 = triton_helpers.maximum(tmp403, tmp34)
tmp405 = tmp1 - tmp402
tmp406 = triton_helpers.maximum(tmp405, tmp34)
tmp407 = tmp404 + tmp406
tmp408 = tmp3 - tmp402
tmp409 = triton_helpers.maximum(tmp408, tmp34)
tmp410 = tmp407 + tmp409
tmp411 = tmp5 - tmp402
tmp412 = triton_helpers.maximum(tmp411, tmp34)
tmp413 = tmp410 + tmp412
tmp414 = tmp413 - tmp9
tmp415 = tmp414 * tmp57
tmp416 = tmp415 >= tmp34
tmp417 = tl.where(tmp416, tmp402, tmp400)
tmp418 = tmp401 * tmp12
tmp419 = tmp417 + tmp418
tmp420 = tmp0 - tmp419
tmp421 = triton_helpers.maximum(tmp420, tmp34)
tmp422 = tmp1 - tmp419
tmp423 = triton_helpers.maximum(tmp422, tmp34)
tmp424 = tmp421 + tmp423
tmp425 = tmp3 - tmp419
tmp426 = triton_helpers.maximum(tmp425, tmp34)
tmp427 = tmp424 + tmp426
tmp428 = tmp5 - tmp419
tmp429 = triton_helpers.maximum(tmp428, tmp34)
tmp430 = tmp427 + tmp429
tmp431 = tmp430 - tmp9
tmp432 = tmp431 * tmp57
tmp433 = tmp432 >= tmp34
tmp434 = tl.where(tmp433, tmp419, tmp417)
tl.store(out_ptr0 + (x0), tmp30, xmask)
tl.store(in_out_ptr16 + (x0), tmp434, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yz/cyzkhbz7eylqmmsct2eisz274ptkf4xwckrryhdzr5c5xpld426a.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_87, p_m_40], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# p_m_40 => clamp_min_41
# sub_87 => sub_87
# tau_m_40 => add_40
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %sub_87 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_40), kwargs = {})
# %clamp_min_41 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_87, 0), kwargs = {})
triton_poi_fused_add_clamp_div_sub_2 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp1 + tmp26
tmp28 = tmp0 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sr/csrhhpesnpea33gn4n5cx2now3swxe3jsyebpywtfcwh4ny67d7d.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sum_42, f_m_40, mul_42, tau_lo_41], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# f_lo => sub_5
# f_m_40 => sub_88
# max_1 => max_1
# mul_42 => mul_42
# sub_4 => sub_4
# sum_1 => sum_1
# sum_42 => sum_42
# tau_lo => sub_2
# tau_lo_41 => where_40
# tau_m_40 => add_40
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_4, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_5 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_41, [-1]), kwargs = {})
# %sub_88 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_42, 1), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_88, %sub_5), kwargs = {})
# %where_40 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_40, %add_40, %where_39), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (x0), xmask)
tmp33 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp32 + tmp57
tmp59 = tl.where(tmp31, tmp58, tmp32)
tl.store(in_out_ptr0 + (x0), tmp59, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ri/cri5g5cwgob26dhdepr7at5q3uqt7g5zn5i44mttvzuefjefarlt.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_89, p_m_41], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# p_m_41 => clamp_min_42
# sub_89 => sub_89
# tau_m_41 => add_41
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {})
# %sub_89 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_41), kwargs = {})
# %clamp_min_42 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_89, 0), kwargs = {})
triton_poi_fused_add_clamp_div_sub_4 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp1 + tmp27
tmp29 = tmp0 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6v/c6v66pcoz5wwqqj3qdoavhcalhw4ggic7f5bbgnf4wulrawivqmo.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sum_43, f_m_41, mul_43, tau_lo_42], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# f_lo => sub_5
# f_m_41 => sub_90
# max_1 => max_1
# mul_43 => mul_43
# sub_4 => sub_4
# sum_1 => sum_1
# sum_43 => sum_43
# tau_lo => sub_2
# tau_lo_42 => where_41
# tau_m_41 => add_41
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_4, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_5 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {})
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_42, [-1]), kwargs = {})
# %sub_90 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_43, 1), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_90, %sub_5), kwargs = {})
# %where_41 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_41, %add_41, %where_40), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (x0), xmask)
tmp33 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp57 * tmp34
tmp59 = tmp32 + tmp58
tmp60 = tl.where(tmp31, tmp59, tmp32)
tl.store(in_out_ptr0 + (x0), tmp60, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lg/clgmpssl52or2lky2wemwq23igzgtq7po6uel2vq4hq3f6g72lml.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_91], Original ATen: [aten.div, aten.add, aten.sub]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# sub_91 => sub_91
# tau_m_42 => add_42
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {})
# %sub_91 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_42), kwargs = {})
triton_poi_fused_add_div_sub_6 = async_compile.triton('triton_poi_fused_add_div_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp1 + tmp28
tmp30 = tmp0 - tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jl/cjlkx2otyy2fduimnbn7ccxqmgq2wq4zlcyndzdk767y2f6xq7z2.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, p_m_42, sum_44, f_m_42, mul_44, tau_lo_43], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# f_lo => sub_5
# f_m_42 => sub_92
# max_1 => max_1
# mul_44 => mul_44
# p_m_42 => clamp_min_43
# sub_4 => sub_4
# sum_1 => sum_1
# sum_44 => sum_44
# tau_lo => sub_2
# tau_lo_43 => where_42
# tau_m_42 => add_42
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_4, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_5 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {})
# %clamp_min_43 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_91, 0), kwargs = {})
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_43, [-1]), kwargs = {})
# %sub_92 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_44, 1), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_92, %sub_5), kwargs = {})
# %where_42 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_42, %add_42, %where_41), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + (x0), xmask)
tmp37 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp36 + tmp63
tmp65 = tl.where(tmp35, tmp64, tmp36)
tl.store(in_out_ptr0 + (x0), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/45/c45ntljmhht4tqet2q6qjdfrins55gu6vaasohzaiqpfex6sk7ob.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_93], Original ATen: [aten.div, aten.add, aten.sub]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# dm_44 => div_43
# sub_93 => sub_93
# tau_m_43 => add_43
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {})
# %sub_93 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_43), kwargs = {})
triton_poi_fused_add_div_sub_8 = async_compile.triton('triton_poi_fused_add_div_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp28 * tmp3
tmp30 = tmp1 + tmp29
tmp31 = tmp0 - tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lz/clzcekwx3vzl5uustsl63asl5aemxd4bfzovv6jgg7f5yz6ljzjf.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, p_m_43, sum_45, f_m_43, mul_45, tau_lo_44, dm_45, tau_m_44, sub_95, p_m_44, sum_46, f_m_44, mul_46, tau_lo_45, dm_46, tau_m_45, sub_97, p_m_45, sum_47, f_m_45, mul_47, tau_lo_46, dm_47, tau_m_46, sub_99, p_m_46, sum_48, f_m_46, mul_48, tau_lo_47, dm_48, tau_m_47, sub_101, p_m_47, sum_49, f_m_47, mul_49, tau_lo_48, dm_49, tau_m_48, sub_103, p_m_48, sum_50, f_m_48, tau_lo_49, dm_50, tau_m_49, sub_105, p_m_49, sum_52], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# dm_44 => div_43
# dm_45 => div_44
# dm_46 => div_45
# dm_47 => div_46
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# f_lo => sub_5
# f_m_43 => sub_94
# f_m_44 => sub_96
# f_m_45 => sub_98
# f_m_46 => sub_100
# f_m_47 => sub_102
# f_m_48 => sub_104
# max_1 => max_1
# mul_45 => mul_45
# mul_46 => mul_46
# mul_47 => mul_47
# mul_48 => mul_48
# mul_49 => mul_49
# p_m_43 => clamp_min_44
# p_m_44 => clamp_min_45
# p_m_45 => clamp_min_46
# p_m_46 => clamp_min_47
# p_m_47 => clamp_min_48
# p_m_48 => clamp_min_49
# p_m_49 => clamp_min_50
# sub_101 => sub_101
# sub_103 => sub_103
# sub_105 => sub_105
# sub_4 => sub_4
# sub_95 => sub_95
# sub_97 => sub_97
# sub_99 => sub_99
# sum_1 => sum_1
# sum_45 => sum_45
# sum_46 => sum_46
# sum_47 => sum_47
# sum_48 => sum_48
# sum_49 => sum_49
# sum_50 => sum_50
# sum_52 => sum_52
# tau_lo => sub_2
# tau_lo_44 => where_43
# tau_lo_45 => where_44
# tau_lo_46 => where_45
# tau_lo_47 => where_46
# tau_lo_48 => where_47
# tau_lo_49 => where_48
# tau_m_43 => add_43
# tau_m_44 => add_44
# tau_m_45 => add_45
# tau_m_46 => add_46
# tau_m_47 => add_47
# tau_m_48 => add_48
# tau_m_49 => add_49
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_4, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_5 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {})
# %clamp_min_44 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_93, 0), kwargs = {})
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_44, [-1]), kwargs = {})
# %sub_94 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_45, 1), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_94, %sub_5), kwargs = {})
# %where_43 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_43, %add_43, %where_42), kwargs = {})
# %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {})
# %add_44 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_43, %div_44), kwargs = {})
# %sub_95 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_44), kwargs = {})
# %clamp_min_45 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_95, 0), kwargs = {})
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_45, [-1]), kwargs = {})
# %sub_96 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_46, 1), kwargs = {})
# %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_96, %sub_5), kwargs = {})
# %where_44 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_44, %add_44, %where_43), kwargs = {})
# %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {})
# %add_45 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_44, %div_45), kwargs = {})
# %sub_97 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_45), kwargs = {})
# %clamp_min_46 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_97, 0), kwargs = {})
# %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_46, [-1]), kwargs = {})
# %sub_98 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_47, 1), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_98, %sub_5), kwargs = {})
# %where_45 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_45, %add_45, %where_44), kwargs = {})
# %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {})
# %add_46 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_45, %div_46), kwargs = {})
# %sub_99 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_46), kwargs = {})
# %clamp_min_47 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_99, 0), kwargs = {})
# %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_47, [-1]), kwargs = {})
# %sub_100 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_48, 1), kwargs = {})
# %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_100, %sub_5), kwargs = {})
# %where_46 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_46, %add_46, %where_45), kwargs = {})
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {})
# %sub_101 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_47), kwargs = {})
# %clamp_min_48 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_101, 0), kwargs = {})
# %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_48, [-1]), kwargs = {})
# %sub_102 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_49, 1), kwargs = {})
# %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_102, %sub_5), kwargs = {})
# %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {})
# %sub_103 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_48), kwargs = {})
# %clamp_min_49 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_103, 0), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_49, [-1]), kwargs = {})
# %sub_104 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_50, 1), kwargs = {})
# %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_105 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_105, 0), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_50, [-1]), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + (x0), xmask)
tmp37 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp63 * tmp38
tmp65 = tmp36 + tmp64
tmp66 = tl.where(tmp35, tmp65, tmp36)
tmp67 = tmp64 * tmp38
tmp68 = tmp66 + tmp67
tmp69 = tmp14 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = tmp15 - tmp68
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = tmp70 + tmp72
tmp74 = tmp17 - tmp68
tmp75 = triton_helpers.maximum(tmp74, tmp1)
tmp76 = tmp73 + tmp75
tmp77 = tmp19 - tmp68
tmp78 = triton_helpers.maximum(tmp77, tmp1)
tmp79 = tmp76 + tmp78
tmp80 = tmp79 - tmp12
tmp81 = tmp80 * tmp33
tmp82 = tmp81 >= tmp1
tmp83 = tl.where(tmp82, tmp68, tmp66)
tmp84 = tmp67 * tmp38
tmp85 = tmp83 + tmp84
tmp86 = tmp14 - tmp85
tmp87 = triton_helpers.maximum(tmp86, tmp1)
tmp88 = tmp15 - tmp85
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = tmp87 + tmp89
tmp91 = tmp17 - tmp85
tmp92 = triton_helpers.maximum(tmp91, tmp1)
tmp93 = tmp90 + tmp92
tmp94 = tmp19 - tmp85
tmp95 = triton_helpers.maximum(tmp94, tmp1)
tmp96 = tmp93 + tmp95
tmp97 = tmp96 - tmp12
tmp98 = tmp97 * tmp33
tmp99 = tmp98 >= tmp1
tmp100 = tl.where(tmp99, tmp85, tmp83)
tmp101 = tmp84 * tmp38
tmp102 = tmp100 + tmp101
tmp103 = tmp14 - tmp102
tmp104 = triton_helpers.maximum(tmp103, tmp1)
tmp105 = tmp15 - tmp102
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = tmp104 + tmp106
tmp108 = tmp17 - tmp102
tmp109 = triton_helpers.maximum(tmp108, tmp1)
tmp110 = tmp107 + tmp109
tmp111 = tmp19 - tmp102
tmp112 = triton_helpers.maximum(tmp111, tmp1)
tmp113 = tmp110 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp33
tmp116 = tmp115 >= tmp1
tmp117 = tl.where(tmp116, tmp102, tmp100)
tmp118 = tmp101 * tmp38
tmp119 = tmp117 + tmp118
tmp120 = tmp14 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp1)
tmp122 = tmp15 - tmp119
tmp123 = triton_helpers.maximum(tmp122, tmp1)
tmp124 = tmp121 + tmp123
tmp125 = tmp17 - tmp119
tmp126 = triton_helpers.maximum(tmp125, tmp1)
tmp127 = tmp124 + tmp126
tmp128 = tmp19 - tmp119
tmp129 = triton_helpers.maximum(tmp128, tmp1)
tmp130 = tmp127 + tmp129
tmp131 = tmp130 - tmp12
tmp132 = tmp131 * tmp33
tmp133 = tmp132 >= tmp1
tmp134 = tl.where(tmp133, tmp119, tmp117)
tmp135 = tmp118 * tmp38
tmp136 = tmp134 + tmp135
tmp137 = tmp14 - tmp136
tmp138 = triton_helpers.maximum(tmp137, tmp1)
tmp139 = tmp15 - tmp136
tmp140 = triton_helpers.maximum(tmp139, tmp1)
tmp141 = tmp138 + tmp140
tmp142 = tmp17 - tmp136
tmp143 = triton_helpers.maximum(tmp142, tmp1)
tmp144 = tmp141 + tmp143
tmp145 = tmp19 - tmp136
tmp146 = triton_helpers.maximum(tmp145, tmp1)
tmp147 = tmp144 + tmp146
tmp148 = tmp147 - tmp12
tmp149 = tmp148 * tmp33
tmp150 = tmp149 >= tmp1
tmp151 = tl.where(tmp150, tmp136, tmp134)
tmp152 = tmp135 * tmp38
tmp153 = tmp151 + tmp152
tmp154 = tmp14 - tmp153
tmp155 = triton_helpers.maximum(tmp154, tmp1)
tmp156 = tmp15 - tmp153
tmp157 = triton_helpers.maximum(tmp156, tmp1)
tmp158 = tmp155 + tmp157
tmp159 = tmp17 - tmp153
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = tmp158 + tmp160
tmp162 = tmp19 - tmp153
tmp163 = triton_helpers.maximum(tmp162, tmp1)
tmp164 = tmp161 + tmp163
tl.store(out_ptr4 + (x0), tmp101, xmask)
tl.store(in_out_ptr2 + (x0), tmp151, xmask)
tl.store(out_ptr7 + (x0), tmp164, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qe/cqe4wxdrqmlpdjpsore2xrtnu7imed3hrgbsaycsyw5yzkt5vtf2.py
# Topologically Sorted Source Nodes: [dm_48, dm_49, dm_50, tau_m_49, sub_105, p_m_49, p_m_50], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# p_m_49 => clamp_min_50
# p_m_50 => div_50
# sub_105 => sub_105
# tau_m_49 => add_49
# Graph fragment:
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_105 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_105, 0), kwargs = {})
# %div_50 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_min_50, %unsqueeze_50), kwargs = {})
triton_poi_fused_add_clamp_div_sub_10 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp1 + tmp6
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf40 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, tau_m, sub_7, p_m, sum_2, f_m, sub_4, clamp, sum_1, f_lo, mul_2, tau_lo_1, dm_2, tau_m_1, sub_9, p_m_1, sum_3, f_m_1, mul_3, tau_lo_2, dm_3, tau_m_2, sub_11, p_m_2, sum_4, f_m_2, mul_4, tau_lo_3, dm_4, tau_m_3, sub_13, p_m_3, sum_5, f_m_3, mul_5, tau_lo_4, dm_5, tau_m_4, sub_15, p_m_4, sum_6, f_m_4, mul_6, tau_lo_5, dm_6, tau_m_5, sub_17, p_m_5, sum_7, f_m_5, mul_7, tau_lo_6, dm_7, tau_m_6, sub_19, p_m_6, sum_8, f_m_6, mul_8, tau_lo_7, dm_8, tau_m_7, sub_21, p_m_7, sum_9, f_m_7, mul_9, tau_lo_8, dm_9, tau_m_8, sub_23, p_m_8, sum_10, f_m_8, mul_10, tau_lo_9, dm_10, tau_m_9, sub_25, p_m_9, sum_11, f_m_9, mul_11, tau_lo_10, dm_11, tau_m_10, sub_27, p_m_10, sum_12, f_m_10, mul_12, tau_lo_11, dm_12, tau_m_11, sub_29, p_m_11, sum_13, f_m_11, mul_13, tau_lo_12, dm_13, tau_m_12, sub_31, p_m_12, sum_14, f_m_12, mul_14, tau_lo_13, dm_14, tau_m_13, sub_33, p_m_13, sum_15, f_m_13, mul_15, tau_lo_14, dm_15, tau_m_14, sub_35, p_m_14, sum_16, f_m_14, mul_16, tau_lo_15, dm_16, tau_m_15, sub_37, p_m_15, sum_17, f_m_15, mul_17, tau_lo_16, dm_17, tau_m_16, sub_39, p_m_16, sum_18, f_m_16, mul_18, tau_lo_17], Original ATen: [aten.max, aten.sub, aten.div, aten.add, aten.clamp, aten.sum, aten.mul, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0.run(buf41, arg0_1, 64, grid=grid(64), stream=stream0)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf82 = reinterpret_tensor(buf81, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf81 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, sub_4, clamp, sum_1, f_lo, dm_2, dm_3, dm_4, dm_5, dm_6, dm_7, dm_8, dm_9, dm_10, dm_11, dm_12, dm_13, dm_14, dm_15, dm_16, dm_17, dm_18, tau_m_17, sub_41, p_m_17, sum_19, f_m_17, mul_19, tau_lo_18, dm_19, tau_m_18, sub_43, p_m_18, sum_20, f_m_18, mul_20, tau_lo_19, dm_20, tau_m_19, sub_45, p_m_19, sum_21, f_m_19, tau_lo_20, dm_21, tau_m_20, sub_47, p_m_20, sum_22, f_m_20, mul_22, tau_lo_21, dm_22, tau_m_21, sub_49, p_m_21, sum_23, f_m_21, mul_23, tau_lo_22, dm_23, tau_m_22, sub_51, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_53, p_m_23, sum_25, f_m_23, mul_25, tau_lo_24, dm_25, tau_m_24, sub_55, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_57, p_m_25, sum_27, f_m_25, mul_27, tau_lo_26, dm_27, tau_m_26, sub_59, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_61, p_m_27, sum_29, f_m_27, tau_lo_28, dm_29, tau_m_28, sub_63, p_m_28, sum_30, tau_lo_29, dm_30, tau_m_29, sub_65, p_m_29, sum_31, tau_lo_30, dm_31, tau_m_30, sub_67, p_m_30, sum_32, tau_lo_31, dm_32, tau_m_31, sub_69, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_71, p_m_32, sum_34, tau_lo_33, dm_34, tau_m_33, sub_73, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_75, p_m_34, sum_36, tau_lo_35, dm_36, tau_m_35, sub_77, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_79, p_m_36, sum_38, tau_lo_37, dm_38, tau_m_37, sub_81, p_m_37, sum_39, tau_lo_38, dm_39, tau_m_38, sub_83, p_m_38, sum_40, tau_lo_39, dm_40, tau_m_39, sub_85, p_m_39, sum_41, tau_lo_40], Original ATen: [aten.max, aten.sub, aten.div, aten.clamp, aten.sum, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1.run(buf82, arg0_1, buf41, buf42, 64, grid=grid(64), stream=stream0)
buf83 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_87, p_m_40], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_2.run(arg0_1, buf82, buf42, buf83, 256, grid=grid(256), stream=stream0)
buf85 = buf82; del buf82 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sum_42, f_m_40, mul_42, tau_lo_41], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3.run(buf85, buf83, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf86 = buf83; del buf83 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_89, p_m_41], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_4.run(arg0_1, buf85, buf42, buf86, 256, grid=grid(256), stream=stream0)
buf88 = buf85; del buf85 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sum_43, f_m_41, mul_43, tau_lo_42], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5.run(buf88, buf86, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf89 = buf86; del buf86 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_91], Original ATen: [aten.div, aten.add, aten.sub]
triton_poi_fused_add_div_sub_6.run(arg0_1, buf88, buf42, buf89, 256, grid=grid(256), stream=stream0)
buf91 = buf88; del buf88 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, p_m_42, sum_44, f_m_42, mul_44, tau_lo_43], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7.run(buf91, buf89, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf92 = buf89; del buf89 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_93], Original ATen: [aten.div, aten.add, aten.sub]
triton_poi_fused_add_div_sub_8.run(arg0_1, buf91, buf42, buf92, 256, grid=grid(256), stream=stream0)
buf94 = buf91; del buf91 # reuse
buf100 = buf41; del buf41 # reuse
buf103 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf104 = reinterpret_tensor(buf103, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf103 # reuse
buf105 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_4, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, p_m_43, sum_45, f_m_43, mul_45, tau_lo_44, dm_45, tau_m_44, sub_95, p_m_44, sum_46, f_m_44, mul_46, tau_lo_45, dm_46, tau_m_45, sub_97, p_m_45, sum_47, f_m_45, mul_47, tau_lo_46, dm_47, tau_m_46, sub_99, p_m_46, sum_48, f_m_46, mul_48, tau_lo_47, dm_48, tau_m_47, sub_101, p_m_47, sum_49, f_m_47, mul_49, tau_lo_48, dm_49, tau_m_48, sub_103, p_m_48, sum_50, f_m_48, tau_lo_49, dm_50, tau_m_49, sub_105, p_m_49, sum_52], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9.run(buf94, buf104, buf92, arg0_1, buf42, buf100, buf105, 64, grid=grid(64), stream=stream0)
del buf42
del buf94
buf106 = buf92; del buf92 # reuse
# Topologically Sorted Source Nodes: [dm_48, dm_49, dm_50, tau_m_49, sub_105, p_m_49, p_m_50], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_10.run(arg0_1, buf104, buf100, buf105, buf106, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf100
del buf104
del buf105
return (buf106, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):
"""sparsemax: normalizing sparse transform (a la softmax), via bisection.
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Note: This function does not yet support normalizing along anything except
the last dimension. Please use transposing and views to achieve more
general behavior.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
max_val, _ = X.max(dim=dim, keepdim=True)
X = X * (alpha - 1)
max_val = max_val * (alpha - 1)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class SparsemaxBisectFunction(EntmaxBisectFunction):
@classmethod
def _gp(cls, x, alpha):
return x
@classmethod
def _gp_inv(cls, y, alpha):
return y
@classmethod
def _p(cls, x, alpha):
return torch.clamp(x, min=0)
@classmethod
def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True):
return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50,
ensure_sum_one=True)
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y > 0
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None, None
class SparsemaxBisect(nn.Module):
def __init__(self, dim=-1, n_iter=None):
"""sparsemax: normalizing sparse transform (a la softmax) via bisection
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
super().__init__()
def forward(self, X):
return sparsemax_bisect(X, dim=self.dim, n_iter=self.n_iter)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0(in_out_ptr8,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp9 = 0.25
tmp10 = tmp6 - tmp9
tmp11 = tmp10 - tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = tmp0 - tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp1 - tmp14
tmp19 = triton_helpers.maximum(tmp18, tmp16)
tmp20 = tmp17 + tmp19
tmp21 = tmp3 - tmp14
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = tmp20 + tmp22
tmp24 = tmp5 - tmp14
tmp25 = triton_helpers.maximum(tmp24, tmp16)
tmp26 = tmp23 + tmp25
tmp27 = tmp26 - tmp7
tmp28 = tmp0 - tmp8
tmp29 = triton_helpers.maximum(tmp28, tmp16)
tmp30 = tmp1 - tmp8
tmp31 = triton_helpers.maximum(tmp30, tmp16)
tmp32 = tmp29 + tmp31
tmp33 = tmp3 - tmp8
tmp34 = triton_helpers.maximum(tmp33, tmp16)
tmp35 = tmp32 + tmp34
tmp36 = tmp5 - tmp8
tmp37 = triton_helpers.maximum(tmp36, tmp16)
tmp38 = tmp35 + tmp37
tmp39 = tmp38 - tmp7
tmp40 = tmp27 * tmp39
tmp41 = tmp40 >= tmp16
tmp42 = tl.where(tmp41, tmp14, tmp8)
tmp43 = tmp13 * tmp12
tmp44 = tmp42 + tmp43
tmp45 = tmp0 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp16)
tmp47 = tmp1 - tmp44
tmp48 = triton_helpers.maximum(tmp47, tmp16)
tmp49 = tmp46 + tmp48
tmp50 = tmp3 - tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp16)
tmp52 = tmp49 + tmp51
tmp53 = tmp5 - tmp44
tmp54 = triton_helpers.maximum(tmp53, tmp16)
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp7
tmp57 = tmp56 * tmp39
tmp58 = tmp57 >= tmp16
tmp59 = tl.where(tmp58, tmp44, tmp42)
tmp60 = tmp43 * tmp12
tmp61 = tmp59 + tmp60
tmp62 = tmp0 - tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp16)
tmp64 = tmp1 - tmp61
tmp65 = triton_helpers.maximum(tmp64, tmp16)
tmp66 = tmp63 + tmp65
tmp67 = tmp3 - tmp61
tmp68 = triton_helpers.maximum(tmp67, tmp16)
tmp69 = tmp66 + tmp68
tmp70 = tmp5 - tmp61
tmp71 = triton_helpers.maximum(tmp70, tmp16)
tmp72 = tmp69 + tmp71
tmp73 = tmp72 - tmp7
tmp74 = tmp73 * tmp39
tmp75 = tmp74 >= tmp16
tmp76 = tl.where(tmp75, tmp61, tmp59)
tmp77 = tmp60 * tmp12
tmp78 = tmp76 + tmp77
tmp79 = tmp0 - tmp78
tmp80 = triton_helpers.maximum(tmp79, tmp16)
tmp81 = tmp1 - tmp78
tmp82 = triton_helpers.maximum(tmp81, tmp16)
tmp83 = tmp80 + tmp82
tmp84 = tmp3 - tmp78
tmp85 = triton_helpers.maximum(tmp84, tmp16)
tmp86 = tmp83 + tmp85
tmp87 = tmp5 - tmp78
tmp88 = triton_helpers.maximum(tmp87, tmp16)
tmp89 = tmp86 + tmp88
tmp90 = tmp89 - tmp7
tmp91 = tmp90 * tmp39
tmp92 = tmp91 >= tmp16
tmp93 = tl.where(tmp92, tmp78, tmp76)
tmp94 = tmp77 * tmp12
tmp95 = tmp93 + tmp94
tmp96 = tmp0 - tmp95
tmp97 = triton_helpers.maximum(tmp96, tmp16)
tmp98 = tmp1 - tmp95
tmp99 = triton_helpers.maximum(tmp98, tmp16)
tmp100 = tmp97 + tmp99
tmp101 = tmp3 - tmp95
tmp102 = triton_helpers.maximum(tmp101, tmp16)
tmp103 = tmp100 + tmp102
tmp104 = tmp5 - tmp95
tmp105 = triton_helpers.maximum(tmp104, tmp16)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 - tmp7
tmp108 = tmp107 * tmp39
tmp109 = tmp108 >= tmp16
tmp110 = tl.where(tmp109, tmp95, tmp93)
tmp111 = tmp94 * tmp12
tmp112 = tmp110 + tmp111
tmp113 = tmp0 - tmp112
tmp114 = triton_helpers.maximum(tmp113, tmp16)
tmp115 = tmp1 - tmp112
tmp116 = triton_helpers.maximum(tmp115, tmp16)
tmp117 = tmp114 + tmp116
tmp118 = tmp3 - tmp112
tmp119 = triton_helpers.maximum(tmp118, tmp16)
tmp120 = tmp117 + tmp119
tmp121 = tmp5 - tmp112
tmp122 = triton_helpers.maximum(tmp121, tmp16)
tmp123 = tmp120 + tmp122
tmp124 = tmp123 - tmp7
tmp125 = tmp124 * tmp39
tmp126 = tmp125 >= tmp16
tmp127 = tl.where(tmp126, tmp112, tmp110)
tmp128 = tmp111 * tmp12
tmp129 = tmp127 + tmp128
tmp130 = tmp0 - tmp129
tmp131 = triton_helpers.maximum(tmp130, tmp16)
tmp132 = tmp1 - tmp129
tmp133 = triton_helpers.maximum(tmp132, tmp16)
tmp134 = tmp131 + tmp133
tmp135 = tmp3 - tmp129
tmp136 = triton_helpers.maximum(tmp135, tmp16)
tmp137 = tmp134 + tmp136
tmp138 = tmp5 - tmp129
tmp139 = triton_helpers.maximum(tmp138, tmp16)
tmp140 = tmp137 + tmp139
tmp141 = tmp140 - tmp7
tmp142 = tmp141 * tmp39
tmp143 = tmp142 >= tmp16
tmp144 = tl.where(tmp143, tmp129, tmp127)
tmp145 = tmp128 * tmp12
tmp146 = tmp144 + tmp145
tmp147 = tmp0 - tmp146
tmp148 = triton_helpers.maximum(tmp147, tmp16)
tmp149 = tmp1 - tmp146
tmp150 = triton_helpers.maximum(tmp149, tmp16)
tmp151 = tmp148 + tmp150
tmp152 = tmp3 - tmp146
tmp153 = triton_helpers.maximum(tmp152, tmp16)
tmp154 = tmp151 + tmp153
tmp155 = tmp5 - tmp146
tmp156 = triton_helpers.maximum(tmp155, tmp16)
tmp157 = tmp154 + tmp156
tmp158 = tmp157 - tmp7
tmp159 = tmp158 * tmp39
tmp160 = tmp159 >= tmp16
tmp161 = tl.where(tmp160, tmp146, tmp144)
tmp162 = tmp145 * tmp12
tmp163 = tmp161 + tmp162
tmp164 = tmp0 - tmp163
tmp165 = triton_helpers.maximum(tmp164, tmp16)
tmp166 = tmp1 - tmp163
tmp167 = triton_helpers.maximum(tmp166, tmp16)
tmp168 = tmp165 + tmp167
tmp169 = tmp3 - tmp163
tmp170 = triton_helpers.maximum(tmp169, tmp16)
tmp171 = tmp168 + tmp170
tmp172 = tmp5 - tmp163
tmp173 = triton_helpers.maximum(tmp172, tmp16)
tmp174 = tmp171 + tmp173
tmp175 = tmp174 - tmp7
tmp176 = tmp175 * tmp39
tmp177 = tmp176 >= tmp16
tmp178 = tl.where(tmp177, tmp163, tmp161)
tmp179 = tmp162 * tmp12
tmp180 = tmp178 + tmp179
tmp181 = tmp0 - tmp180
tmp182 = triton_helpers.maximum(tmp181, tmp16)
tmp183 = tmp1 - tmp180
tmp184 = triton_helpers.maximum(tmp183, tmp16)
tmp185 = tmp182 + tmp184
tmp186 = tmp3 - tmp180
tmp187 = triton_helpers.maximum(tmp186, tmp16)
tmp188 = tmp185 + tmp187
tmp189 = tmp5 - tmp180
tmp190 = triton_helpers.maximum(tmp189, tmp16)
tmp191 = tmp188 + tmp190
tmp192 = tmp191 - tmp7
tmp193 = tmp192 * tmp39
tmp194 = tmp193 >= tmp16
tmp195 = tl.where(tmp194, tmp180, tmp178)
tmp196 = tmp179 * tmp12
tmp197 = tmp195 + tmp196
tmp198 = tmp0 - tmp197
tmp199 = triton_helpers.maximum(tmp198, tmp16)
tmp200 = tmp1 - tmp197
tmp201 = triton_helpers.maximum(tmp200, tmp16)
tmp202 = tmp199 + tmp201
tmp203 = tmp3 - tmp197
tmp204 = triton_helpers.maximum(tmp203, tmp16)
tmp205 = tmp202 + tmp204
tmp206 = tmp5 - tmp197
tmp207 = triton_helpers.maximum(tmp206, tmp16)
tmp208 = tmp205 + tmp207
tmp209 = tmp208 - tmp7
tmp210 = tmp209 * tmp39
tmp211 = tmp210 >= tmp16
tmp212 = tl.where(tmp211, tmp197, tmp195)
tmp213 = tmp196 * tmp12
tmp214 = tmp212 + tmp213
tmp215 = tmp0 - tmp214
tmp216 = triton_helpers.maximum(tmp215, tmp16)
tmp217 = tmp1 - tmp214
tmp218 = triton_helpers.maximum(tmp217, tmp16)
tmp219 = tmp216 + tmp218
tmp220 = tmp3 - tmp214
tmp221 = triton_helpers.maximum(tmp220, tmp16)
tmp222 = tmp219 + tmp221
tmp223 = tmp5 - tmp214
tmp224 = triton_helpers.maximum(tmp223, tmp16)
tmp225 = tmp222 + tmp224
tmp226 = tmp225 - tmp7
tmp227 = tmp226 * tmp39
tmp228 = tmp227 >= tmp16
tmp229 = tl.where(tmp228, tmp214, tmp212)
tmp230 = tmp213 * tmp12
tmp231 = tmp229 + tmp230
tmp232 = tmp0 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp16)
tmp234 = tmp1 - tmp231
tmp235 = triton_helpers.maximum(tmp234, tmp16)
tmp236 = tmp233 + tmp235
tmp237 = tmp3 - tmp231
tmp238 = triton_helpers.maximum(tmp237, tmp16)
tmp239 = tmp236 + tmp238
tmp240 = tmp5 - tmp231
tmp241 = triton_helpers.maximum(tmp240, tmp16)
tmp242 = tmp239 + tmp241
tmp243 = tmp242 - tmp7
tmp244 = tmp243 * tmp39
tmp245 = tmp244 >= tmp16
tmp246 = tl.where(tmp245, tmp231, tmp229)
tmp247 = tmp230 * tmp12
tmp248 = tmp246 + tmp247
tmp249 = tmp0 - tmp248
tmp250 = triton_helpers.maximum(tmp249, tmp16)
tmp251 = tmp1 - tmp248
tmp252 = triton_helpers.maximum(tmp251, tmp16)
tmp253 = tmp250 + tmp252
tmp254 = tmp3 - tmp248
tmp255 = triton_helpers.maximum(tmp254, tmp16)
tmp256 = tmp253 + tmp255
tmp257 = tmp5 - tmp248
tmp258 = triton_helpers.maximum(tmp257, tmp16)
tmp259 = tmp256 + tmp258
tmp260 = tmp259 - tmp7
tmp261 = tmp260 * tmp39
tmp262 = tmp261 >= tmp16
tmp263 = tl.where(tmp262, tmp248, tmp246)
tmp264 = tmp247 * tmp12
tmp265 = tmp263 + tmp264
tmp266 = tmp0 - tmp265
tmp267 = triton_helpers.maximum(tmp266, tmp16)
tmp268 = tmp1 - tmp265
tmp269 = triton_helpers.maximum(tmp268, tmp16)
tmp270 = tmp267 + tmp269
tmp271 = tmp3 - tmp265
tmp272 = triton_helpers.maximum(tmp271, tmp16)
tmp273 = tmp270 + tmp272
tmp274 = tmp5 - tmp265
tmp275 = triton_helpers.maximum(tmp274, tmp16)
tmp276 = tmp273 + tmp275
tmp277 = tmp276 - tmp7
tmp278 = tmp277 * tmp39
tmp279 = tmp278 >= tmp16
tmp280 = tl.where(tmp279, tmp265, tmp263)
tmp281 = tmp264 * tmp12
tmp282 = tmp280 + tmp281
tmp283 = tmp0 - tmp282
tmp284 = triton_helpers.maximum(tmp283, tmp16)
tmp285 = tmp1 - tmp282
tmp286 = triton_helpers.maximum(tmp285, tmp16)
tmp287 = tmp284 + tmp286
tmp288 = tmp3 - tmp282
tmp289 = triton_helpers.maximum(tmp288, tmp16)
tmp290 = tmp287 + tmp289
tmp291 = tmp5 - tmp282
tmp292 = triton_helpers.maximum(tmp291, tmp16)
tmp293 = tmp290 + tmp292
tmp294 = tmp293 - tmp7
tmp295 = tmp294 * tmp39
tmp296 = tmp295 >= tmp16
tmp297 = tl.where(tmp296, tmp282, tmp280)
tmp298 = tmp281 * tmp12
tmp299 = tmp297 + tmp298
tmp300 = tmp0 - tmp299
tmp301 = triton_helpers.maximum(tmp300, tmp16)
tmp302 = tmp1 - tmp299
tmp303 = triton_helpers.maximum(tmp302, tmp16)
tmp304 = tmp301 + tmp303
tmp305 = tmp3 - tmp299
tmp306 = triton_helpers.maximum(tmp305, tmp16)
tmp307 = tmp304 + tmp306
tmp308 = tmp5 - tmp299
tmp309 = triton_helpers.maximum(tmp308, tmp16)
tmp310 = tmp307 + tmp309
tmp311 = tmp310 - tmp7
tmp312 = tmp311 * tmp39
tmp313 = tmp312 >= tmp16
tmp314 = tl.where(tmp313, tmp299, tmp297)
tl.store(in_out_ptr8 + x0, tmp314, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1(in_out_ptr16,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + x0, xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.25
tmp8 = tmp6 - tmp7
tmp9 = 1.0
tmp10 = tmp6 - tmp9
tmp11 = tmp8 - tmp10
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp12
tmp15 = tmp14 * tmp12
tmp16 = tmp15 * tmp12
tmp17 = tmp16 * tmp12
tmp18 = tmp17 * tmp12
tmp19 = tmp18 * tmp12
tmp20 = tmp19 * tmp12
tmp21 = tmp20 * tmp12
tmp22 = tmp21 * tmp12
tmp23 = tmp22 * tmp12
tmp24 = tmp23 * tmp12
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp12
tmp27 = tmp26 * tmp12
tmp28 = tmp27 * tmp12
tmp29 = tmp28 * tmp12
tmp30 = tmp29 * tmp12
tmp32 = tmp31 + tmp30
tmp33 = tmp0 - tmp32
tmp34 = 0.0
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp1 - tmp32
tmp37 = triton_helpers.maximum(tmp36, tmp34)
tmp38 = tmp35 + tmp37
tmp39 = tmp3 - tmp32
tmp40 = triton_helpers.maximum(tmp39, tmp34)
tmp41 = tmp38 + tmp40
tmp42 = tmp5 - tmp32
tmp43 = triton_helpers.maximum(tmp42, tmp34)
tmp44 = tmp41 + tmp43
tmp45 = tmp44 - tmp9
tmp46 = tmp0 - tmp10
tmp47 = triton_helpers.maximum(tmp46, tmp34)
tmp48 = tmp1 - tmp10
tmp49 = triton_helpers.maximum(tmp48, tmp34)
tmp50 = tmp47 + tmp49
tmp51 = tmp3 - tmp10
tmp52 = triton_helpers.maximum(tmp51, tmp34)
tmp53 = tmp50 + tmp52
tmp54 = tmp5 - tmp10
tmp55 = triton_helpers.maximum(tmp54, tmp34)
tmp56 = tmp53 + tmp55
tmp57 = tmp56 - tmp9
tmp58 = tmp45 * tmp57
tmp59 = tmp58 >= tmp34
tmp60 = tl.where(tmp59, tmp32, tmp31)
tmp61 = tmp30 * tmp12
tmp62 = tmp60 + tmp61
tmp63 = tmp0 - tmp62
tmp64 = triton_helpers.maximum(tmp63, tmp34)
tmp65 = tmp1 - tmp62
tmp66 = triton_helpers.maximum(tmp65, tmp34)
tmp67 = tmp64 + tmp66
tmp68 = tmp3 - tmp62
tmp69 = triton_helpers.maximum(tmp68, tmp34)
tmp70 = tmp67 + tmp69
tmp71 = tmp5 - tmp62
tmp72 = triton_helpers.maximum(tmp71, tmp34)
tmp73 = tmp70 + tmp72
tmp74 = tmp73 - tmp9
tmp75 = tmp74 * tmp57
tmp76 = tmp75 >= tmp34
tmp77 = tl.where(tmp76, tmp62, tmp60)
tmp78 = tmp61 * tmp12
tmp79 = tmp77 + tmp78
tmp80 = tmp0 - tmp79
tmp81 = triton_helpers.maximum(tmp80, tmp34)
tmp82 = tmp1 - tmp79
tmp83 = triton_helpers.maximum(tmp82, tmp34)
tmp84 = tmp81 + tmp83
tmp85 = tmp3 - tmp79
tmp86 = triton_helpers.maximum(tmp85, tmp34)
tmp87 = tmp84 + tmp86
tmp88 = tmp5 - tmp79
tmp89 = triton_helpers.maximum(tmp88, tmp34)
tmp90 = tmp87 + tmp89
tmp91 = tmp90 - tmp9
tmp92 = tmp91 * tmp57
tmp93 = tmp92 >= tmp34
tmp94 = tl.where(tmp93, tmp79, tmp77)
tmp95 = tmp78 * tmp12
tmp96 = tmp94 + tmp95
tmp97 = tmp0 - tmp96
tmp98 = triton_helpers.maximum(tmp97, tmp34)
tmp99 = tmp1 - tmp96
tmp100 = triton_helpers.maximum(tmp99, tmp34)
tmp101 = tmp98 + tmp100
tmp102 = tmp3 - tmp96
tmp103 = triton_helpers.maximum(tmp102, tmp34)
tmp104 = tmp101 + tmp103
tmp105 = tmp5 - tmp96
tmp106 = triton_helpers.maximum(tmp105, tmp34)
tmp107 = tmp104 + tmp106
tmp108 = tmp107 - tmp9
tmp109 = tmp108 * tmp57
tmp110 = tmp109 >= tmp34
tmp111 = tl.where(tmp110, tmp96, tmp94)
tmp112 = tmp95 * tmp12
tmp113 = tmp111 + tmp112
tmp114 = tmp0 - tmp113
tmp115 = triton_helpers.maximum(tmp114, tmp34)
tmp116 = tmp1 - tmp113
tmp117 = triton_helpers.maximum(tmp116, tmp34)
tmp118 = tmp115 + tmp117
tmp119 = tmp3 - tmp113
tmp120 = triton_helpers.maximum(tmp119, tmp34)
tmp121 = tmp118 + tmp120
tmp122 = tmp5 - tmp113
tmp123 = triton_helpers.maximum(tmp122, tmp34)
tmp124 = tmp121 + tmp123
tmp125 = tmp124 - tmp9
tmp126 = tmp125 * tmp57
tmp127 = tmp126 >= tmp34
tmp128 = tl.where(tmp127, tmp113, tmp111)
tmp129 = tmp112 * tmp12
tmp130 = tmp128 + tmp129
tmp131 = tmp0 - tmp130
tmp132 = triton_helpers.maximum(tmp131, tmp34)
tmp133 = tmp1 - tmp130
tmp134 = triton_helpers.maximum(tmp133, tmp34)
tmp135 = tmp132 + tmp134
tmp136 = tmp3 - tmp130
tmp137 = triton_helpers.maximum(tmp136, tmp34)
tmp138 = tmp135 + tmp137
tmp139 = tmp5 - tmp130
tmp140 = triton_helpers.maximum(tmp139, tmp34)
tmp141 = tmp138 + tmp140
tmp142 = tmp141 - tmp9
tmp143 = tmp142 * tmp57
tmp144 = tmp143 >= tmp34
tmp145 = tl.where(tmp144, tmp130, tmp128)
tmp146 = tmp129 * tmp12
tmp147 = tmp145 + tmp146
tmp148 = tmp0 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp34)
tmp150 = tmp1 - tmp147
tmp151 = triton_helpers.maximum(tmp150, tmp34)
tmp152 = tmp149 + tmp151
tmp153 = tmp3 - tmp147
tmp154 = triton_helpers.maximum(tmp153, tmp34)
tmp155 = tmp152 + tmp154
tmp156 = tmp5 - tmp147
tmp157 = triton_helpers.maximum(tmp156, tmp34)
tmp158 = tmp155 + tmp157
tmp159 = tmp158 - tmp9
tmp160 = tmp159 * tmp57
tmp161 = tmp160 >= tmp34
tmp162 = tl.where(tmp161, tmp147, tmp145)
tmp163 = tmp146 * tmp12
tmp164 = tmp162 + tmp163
tmp165 = tmp0 - tmp164
tmp166 = triton_helpers.maximum(tmp165, tmp34)
tmp167 = tmp1 - tmp164
tmp168 = triton_helpers.maximum(tmp167, tmp34)
tmp169 = tmp166 + tmp168
tmp170 = tmp3 - tmp164
tmp171 = triton_helpers.maximum(tmp170, tmp34)
tmp172 = tmp169 + tmp171
tmp173 = tmp5 - tmp164
tmp174 = triton_helpers.maximum(tmp173, tmp34)
tmp175 = tmp172 + tmp174
tmp176 = tmp175 - tmp9
tmp177 = tmp176 * tmp57
tmp178 = tmp177 >= tmp34
tmp179 = tl.where(tmp178, tmp164, tmp162)
tmp180 = tmp163 * tmp12
tmp181 = tmp179 + tmp180
tmp182 = tmp0 - tmp181
tmp183 = triton_helpers.maximum(tmp182, tmp34)
tmp184 = tmp1 - tmp181
tmp185 = triton_helpers.maximum(tmp184, tmp34)
tmp186 = tmp183 + tmp185
tmp187 = tmp3 - tmp181
tmp188 = triton_helpers.maximum(tmp187, tmp34)
tmp189 = tmp186 + tmp188
tmp190 = tmp5 - tmp181
tmp191 = triton_helpers.maximum(tmp190, tmp34)
tmp192 = tmp189 + tmp191
tmp193 = tmp192 - tmp9
tmp194 = tmp193 * tmp57
tmp195 = tmp194 >= tmp34
tmp196 = tl.where(tmp195, tmp181, tmp179)
tmp197 = tmp180 * tmp12
tmp198 = tmp196 + tmp197
tmp199 = tmp0 - tmp198
tmp200 = triton_helpers.maximum(tmp199, tmp34)
tmp201 = tmp1 - tmp198
tmp202 = triton_helpers.maximum(tmp201, tmp34)
tmp203 = tmp200 + tmp202
tmp204 = tmp3 - tmp198
tmp205 = triton_helpers.maximum(tmp204, tmp34)
tmp206 = tmp203 + tmp205
tmp207 = tmp5 - tmp198
tmp208 = triton_helpers.maximum(tmp207, tmp34)
tmp209 = tmp206 + tmp208
tmp210 = tmp209 - tmp9
tmp211 = tmp210 * tmp57
tmp212 = tmp211 >= tmp34
tmp213 = tl.where(tmp212, tmp198, tmp196)
tmp214 = tmp197 * tmp12
tmp215 = tmp213 + tmp214
tmp216 = tmp0 - tmp215
tmp217 = triton_helpers.maximum(tmp216, tmp34)
tmp218 = tmp1 - tmp215
tmp219 = triton_helpers.maximum(tmp218, tmp34)
tmp220 = tmp217 + tmp219
tmp221 = tmp3 - tmp215
tmp222 = triton_helpers.maximum(tmp221, tmp34)
tmp223 = tmp220 + tmp222
tmp224 = tmp5 - tmp215
tmp225 = triton_helpers.maximum(tmp224, tmp34)
tmp226 = tmp223 + tmp225
tmp227 = tmp226 - tmp9
tmp228 = tmp227 * tmp57
tmp229 = tmp228 >= tmp34
tmp230 = tl.where(tmp229, tmp215, tmp213)
tmp231 = tmp214 * tmp12
tmp232 = tmp230 + tmp231
tmp233 = tmp0 - tmp232
tmp234 = triton_helpers.maximum(tmp233, tmp34)
tmp235 = tmp1 - tmp232
tmp236 = triton_helpers.maximum(tmp235, tmp34)
tmp237 = tmp234 + tmp236
tmp238 = tmp3 - tmp232
tmp239 = triton_helpers.maximum(tmp238, tmp34)
tmp240 = tmp237 + tmp239
tmp241 = tmp5 - tmp232
tmp242 = triton_helpers.maximum(tmp241, tmp34)
tmp243 = tmp240 + tmp242
tmp244 = tmp243 - tmp9
tmp245 = tmp244 * tmp57
tmp246 = tmp245 >= tmp34
tmp247 = tl.where(tmp246, tmp232, tmp230)
tmp248 = tmp231 * tmp12
tmp249 = tmp247 + tmp248
tmp250 = tmp0 - tmp249
tmp251 = triton_helpers.maximum(tmp250, tmp34)
tmp252 = tmp1 - tmp249
tmp253 = triton_helpers.maximum(tmp252, tmp34)
tmp254 = tmp251 + tmp253
tmp255 = tmp3 - tmp249
tmp256 = triton_helpers.maximum(tmp255, tmp34)
tmp257 = tmp254 + tmp256
tmp258 = tmp5 - tmp249
tmp259 = triton_helpers.maximum(tmp258, tmp34)
tmp260 = tmp257 + tmp259
tmp261 = tmp260 - tmp9
tmp262 = tmp261 * tmp57
tmp263 = tmp262 >= tmp34
tmp264 = tl.where(tmp263, tmp249, tmp247)
tmp265 = tmp248 * tmp12
tmp266 = tmp264 + tmp265
tmp267 = tmp0 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp34)
tmp269 = tmp1 - tmp266
tmp270 = triton_helpers.maximum(tmp269, tmp34)
tmp271 = tmp268 + tmp270
tmp272 = tmp3 - tmp266
tmp273 = triton_helpers.maximum(tmp272, tmp34)
tmp274 = tmp271 + tmp273
tmp275 = tmp5 - tmp266
tmp276 = triton_helpers.maximum(tmp275, tmp34)
tmp277 = tmp274 + tmp276
tmp278 = tmp277 - tmp9
tmp279 = tmp278 * tmp57
tmp280 = tmp279 >= tmp34
tmp281 = tl.where(tmp280, tmp266, tmp264)
tmp282 = tmp265 * tmp12
tmp283 = tmp281 + tmp282
tmp284 = tmp0 - tmp283
tmp285 = triton_helpers.maximum(tmp284, tmp34)
tmp286 = tmp1 - tmp283
tmp287 = triton_helpers.maximum(tmp286, tmp34)
tmp288 = tmp285 + tmp287
tmp289 = tmp3 - tmp283
tmp290 = triton_helpers.maximum(tmp289, tmp34)
tmp291 = tmp288 + tmp290
tmp292 = tmp5 - tmp283
tmp293 = triton_helpers.maximum(tmp292, tmp34)
tmp294 = tmp291 + tmp293
tmp295 = tmp294 - tmp9
tmp296 = tmp295 * tmp57
tmp297 = tmp296 >= tmp34
tmp298 = tl.where(tmp297, tmp283, tmp281)
tmp299 = tmp282 * tmp12
tmp300 = tmp298 + tmp299
tmp301 = tmp0 - tmp300
tmp302 = triton_helpers.maximum(tmp301, tmp34)
tmp303 = tmp1 - tmp300
tmp304 = triton_helpers.maximum(tmp303, tmp34)
tmp305 = tmp302 + tmp304
tmp306 = tmp3 - tmp300
tmp307 = triton_helpers.maximum(tmp306, tmp34)
tmp308 = tmp305 + tmp307
tmp309 = tmp5 - tmp300
tmp310 = triton_helpers.maximum(tmp309, tmp34)
tmp311 = tmp308 + tmp310
tmp312 = tmp311 - tmp9
tmp313 = tmp312 * tmp57
tmp314 = tmp313 >= tmp34
tmp315 = tl.where(tmp314, tmp300, tmp298)
tmp316 = tmp299 * tmp12
tmp317 = tmp315 + tmp316
tmp318 = tmp0 - tmp317
tmp319 = triton_helpers.maximum(tmp318, tmp34)
tmp320 = tmp1 - tmp317
tmp321 = triton_helpers.maximum(tmp320, tmp34)
tmp322 = tmp319 + tmp321
tmp323 = tmp3 - tmp317
tmp324 = triton_helpers.maximum(tmp323, tmp34)
tmp325 = tmp322 + tmp324
tmp326 = tmp5 - tmp317
tmp327 = triton_helpers.maximum(tmp326, tmp34)
tmp328 = tmp325 + tmp327
tmp329 = tmp328 - tmp9
tmp330 = tmp329 * tmp57
tmp331 = tmp330 >= tmp34
tmp332 = tl.where(tmp331, tmp317, tmp315)
tmp333 = tmp316 * tmp12
tmp334 = tmp332 + tmp333
tmp335 = tmp0 - tmp334
tmp336 = triton_helpers.maximum(tmp335, tmp34)
tmp337 = tmp1 - tmp334
tmp338 = triton_helpers.maximum(tmp337, tmp34)
tmp339 = tmp336 + tmp338
tmp340 = tmp3 - tmp334
tmp341 = triton_helpers.maximum(tmp340, tmp34)
tmp342 = tmp339 + tmp341
tmp343 = tmp5 - tmp334
tmp344 = triton_helpers.maximum(tmp343, tmp34)
tmp345 = tmp342 + tmp344
tmp346 = tmp345 - tmp9
tmp347 = tmp346 * tmp57
tmp348 = tmp347 >= tmp34
tmp349 = tl.where(tmp348, tmp334, tmp332)
tmp350 = tmp333 * tmp12
tmp351 = tmp349 + tmp350
tmp352 = tmp0 - tmp351
tmp353 = triton_helpers.maximum(tmp352, tmp34)
tmp354 = tmp1 - tmp351
tmp355 = triton_helpers.maximum(tmp354, tmp34)
tmp356 = tmp353 + tmp355
tmp357 = tmp3 - tmp351
tmp358 = triton_helpers.maximum(tmp357, tmp34)
tmp359 = tmp356 + tmp358
tmp360 = tmp5 - tmp351
tmp361 = triton_helpers.maximum(tmp360, tmp34)
tmp362 = tmp359 + tmp361
tmp363 = tmp362 - tmp9
tmp364 = tmp363 * tmp57
tmp365 = tmp364 >= tmp34
tmp366 = tl.where(tmp365, tmp351, tmp349)
tmp367 = tmp350 * tmp12
tmp368 = tmp366 + tmp367
tmp369 = tmp0 - tmp368
tmp370 = triton_helpers.maximum(tmp369, tmp34)
tmp371 = tmp1 - tmp368
tmp372 = triton_helpers.maximum(tmp371, tmp34)
tmp373 = tmp370 + tmp372
tmp374 = tmp3 - tmp368
tmp375 = triton_helpers.maximum(tmp374, tmp34)
tmp376 = tmp373 + tmp375
tmp377 = tmp5 - tmp368
tmp378 = triton_helpers.maximum(tmp377, tmp34)
tmp379 = tmp376 + tmp378
tmp380 = tmp379 - tmp9
tmp381 = tmp380 * tmp57
tmp382 = tmp381 >= tmp34
tmp383 = tl.where(tmp382, tmp368, tmp366)
tmp384 = tmp367 * tmp12
tmp385 = tmp383 + tmp384
tmp386 = tmp0 - tmp385
tmp387 = triton_helpers.maximum(tmp386, tmp34)
tmp388 = tmp1 - tmp385
tmp389 = triton_helpers.maximum(tmp388, tmp34)
tmp390 = tmp387 + tmp389
tmp391 = tmp3 - tmp385
tmp392 = triton_helpers.maximum(tmp391, tmp34)
tmp393 = tmp390 + tmp392
tmp394 = tmp5 - tmp385
tmp395 = triton_helpers.maximum(tmp394, tmp34)
tmp396 = tmp393 + tmp395
tmp397 = tmp396 - tmp9
tmp398 = tmp397 * tmp57
tmp399 = tmp398 >= tmp34
tmp400 = tl.where(tmp399, tmp385, tmp383)
tmp401 = tmp384 * tmp12
tmp402 = tmp400 + tmp401
tmp403 = tmp0 - tmp402
tmp404 = triton_helpers.maximum(tmp403, tmp34)
tmp405 = tmp1 - tmp402
tmp406 = triton_helpers.maximum(tmp405, tmp34)
tmp407 = tmp404 + tmp406
tmp408 = tmp3 - tmp402
tmp409 = triton_helpers.maximum(tmp408, tmp34)
tmp410 = tmp407 + tmp409
tmp411 = tmp5 - tmp402
tmp412 = triton_helpers.maximum(tmp411, tmp34)
tmp413 = tmp410 + tmp412
tmp414 = tmp413 - tmp9
tmp415 = tmp414 * tmp57
tmp416 = tmp415 >= tmp34
tmp417 = tl.where(tmp416, tmp402, tmp400)
tmp418 = tmp401 * tmp12
tmp419 = tmp417 + tmp418
tmp420 = tmp0 - tmp419
tmp421 = triton_helpers.maximum(tmp420, tmp34)
tmp422 = tmp1 - tmp419
tmp423 = triton_helpers.maximum(tmp422, tmp34)
tmp424 = tmp421 + tmp423
tmp425 = tmp3 - tmp419
tmp426 = triton_helpers.maximum(tmp425, tmp34)
tmp427 = tmp424 + tmp426
tmp428 = tmp5 - tmp419
tmp429 = triton_helpers.maximum(tmp428, tmp34)
tmp430 = tmp427 + tmp429
tmp431 = tmp430 - tmp9
tmp432 = tmp431 * tmp57
tmp433 = tmp432 >= tmp34
tmp434 = tl.where(tmp433, tmp419, tmp417)
tl.store(out_ptr0 + x0, tmp30, xmask)
tl.store(in_out_ptr16 + x0, tmp434, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp1 + tmp26
tmp28 = tmp0 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_out_ptr0 + x0, xmask)
tmp33 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp32 + tmp57
tmp59 = tl.where(tmp31, tmp58, tmp32)
tl.store(in_out_ptr0 + x0, tmp59, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp1 + tmp27
tmp29 = tmp0 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_out_ptr0 + x0, xmask)
tmp33 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp57 * tmp34
tmp59 = tmp32 + tmp58
tmp60 = tl.where(tmp31, tmp59, tmp32)
tl.store(in_out_ptr0 + x0, tmp60, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp1 + tmp28
tmp30 = tmp0 - tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_out_ptr0 + x0, xmask)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp36 + tmp63
tmp65 = tl.where(tmp35, tmp64, tmp36)
tl.store(in_out_ptr0 + x0, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp28 * tmp3
tmp30 = tmp1 + tmp29
tmp31 = tmp0 - tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9(in_out_ptr0,
in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr7, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_out_ptr0 + x0, xmask)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp63 * tmp38
tmp65 = tmp36 + tmp64
tmp66 = tl.where(tmp35, tmp65, tmp36)
tmp67 = tmp64 * tmp38
tmp68 = tmp66 + tmp67
tmp69 = tmp14 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = tmp15 - tmp68
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = tmp70 + tmp72
tmp74 = tmp17 - tmp68
tmp75 = triton_helpers.maximum(tmp74, tmp1)
tmp76 = tmp73 + tmp75
tmp77 = tmp19 - tmp68
tmp78 = triton_helpers.maximum(tmp77, tmp1)
tmp79 = tmp76 + tmp78
tmp80 = tmp79 - tmp12
tmp81 = tmp80 * tmp33
tmp82 = tmp81 >= tmp1
tmp83 = tl.where(tmp82, tmp68, tmp66)
tmp84 = tmp67 * tmp38
tmp85 = tmp83 + tmp84
tmp86 = tmp14 - tmp85
tmp87 = triton_helpers.maximum(tmp86, tmp1)
tmp88 = tmp15 - tmp85
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = tmp87 + tmp89
tmp91 = tmp17 - tmp85
tmp92 = triton_helpers.maximum(tmp91, tmp1)
tmp93 = tmp90 + tmp92
tmp94 = tmp19 - tmp85
tmp95 = triton_helpers.maximum(tmp94, tmp1)
tmp96 = tmp93 + tmp95
tmp97 = tmp96 - tmp12
tmp98 = tmp97 * tmp33
tmp99 = tmp98 >= tmp1
tmp100 = tl.where(tmp99, tmp85, tmp83)
tmp101 = tmp84 * tmp38
tmp102 = tmp100 + tmp101
tmp103 = tmp14 - tmp102
tmp104 = triton_helpers.maximum(tmp103, tmp1)
tmp105 = tmp15 - tmp102
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = tmp104 + tmp106
tmp108 = tmp17 - tmp102
tmp109 = triton_helpers.maximum(tmp108, tmp1)
tmp110 = tmp107 + tmp109
tmp111 = tmp19 - tmp102
tmp112 = triton_helpers.maximum(tmp111, tmp1)
tmp113 = tmp110 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp33
tmp116 = tmp115 >= tmp1
tmp117 = tl.where(tmp116, tmp102, tmp100)
tmp118 = tmp101 * tmp38
tmp119 = tmp117 + tmp118
tmp120 = tmp14 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp1)
tmp122 = tmp15 - tmp119
tmp123 = triton_helpers.maximum(tmp122, tmp1)
tmp124 = tmp121 + tmp123
tmp125 = tmp17 - tmp119
tmp126 = triton_helpers.maximum(tmp125, tmp1)
tmp127 = tmp124 + tmp126
tmp128 = tmp19 - tmp119
tmp129 = triton_helpers.maximum(tmp128, tmp1)
tmp130 = tmp127 + tmp129
tmp131 = tmp130 - tmp12
tmp132 = tmp131 * tmp33
tmp133 = tmp132 >= tmp1
tmp134 = tl.where(tmp133, tmp119, tmp117)
tmp135 = tmp118 * tmp38
tmp136 = tmp134 + tmp135
tmp137 = tmp14 - tmp136
tmp138 = triton_helpers.maximum(tmp137, tmp1)
tmp139 = tmp15 - tmp136
tmp140 = triton_helpers.maximum(tmp139, tmp1)
tmp141 = tmp138 + tmp140
tmp142 = tmp17 - tmp136
tmp143 = triton_helpers.maximum(tmp142, tmp1)
tmp144 = tmp141 + tmp143
tmp145 = tmp19 - tmp136
tmp146 = triton_helpers.maximum(tmp145, tmp1)
tmp147 = tmp144 + tmp146
tmp148 = tmp147 - tmp12
tmp149 = tmp148 * tmp33
tmp150 = tmp149 >= tmp1
tmp151 = tl.where(tmp150, tmp136, tmp134)
tmp152 = tmp135 * tmp38
tmp153 = tmp151 + tmp152
tmp154 = tmp14 - tmp153
tmp155 = triton_helpers.maximum(tmp154, tmp1)
tmp156 = tmp15 - tmp153
tmp157 = triton_helpers.maximum(tmp156, tmp1)
tmp158 = tmp155 + tmp157
tmp159 = tmp17 - tmp153
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = tmp158 + tmp160
tmp162 = tmp19 - tmp153
tmp163 = triton_helpers.maximum(tmp162, tmp1)
tmp164 = tmp161 + tmp163
tl.store(out_ptr4 + x0, tmp101, xmask)
tl.store(in_out_ptr2 + x0, tmp151, xmask)
tl.store(out_ptr7 + x0, tmp164, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp1 + tmp6
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf40
get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0[grid(64)](buf41,
arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf82 = reinterpret_tensor(buf81, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf81
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1[grid(64)](buf82,
arg0_1, buf41, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf83 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_sub_2[grid(256)](arg0_1, buf82,
buf42, buf83, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf85 = buf82
del buf82
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3[grid(64)](buf85,
buf83, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf86 = buf83
del buf83
triton_poi_fused_add_clamp_div_sub_4[grid(256)](arg0_1, buf85,
buf42, buf86, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf88 = buf85
del buf85
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5[grid(64)](buf88,
buf86, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf89 = buf86
del buf86
triton_poi_fused_add_div_sub_6[grid(256)](arg0_1, buf88, buf42,
buf89, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf91 = buf88
del buf88
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7[grid(64)](buf91,
buf89, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf92 = buf89
del buf89
triton_poi_fused_add_div_sub_8[grid(256)](arg0_1, buf91, buf42,
buf92, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf94 = buf91
del buf91
buf100 = buf41
del buf41
buf103 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf104 = reinterpret_tensor(buf103, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf103
buf105 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9[grid(64)](buf94,
buf104, buf92, arg0_1, buf42, buf100, buf105, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf42
del buf94
buf106 = buf92
del buf92
triton_poi_fused_add_clamp_div_sub_10[grid(256)](arg0_1, buf104,
buf100, buf105, buf106, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf100
del buf104
del buf105
return buf106,
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):
"""sparsemax: normalizing sparse transform (a la softmax), via bisection.
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Note: This function does not yet support normalizing along anything except
the last dimension. Please use transposing and views to achieve more
general behavior.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
max_val, _ = X.max(dim=dim, keepdim=True)
X = X * (alpha - 1)
max_val = max_val * (alpha - 1)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class SparsemaxBisectFunction(EntmaxBisectFunction):
@classmethod
def _gp(cls, x, alpha):
return x
@classmethod
def _gp_inv(cls, y, alpha):
return y
@classmethod
def _p(cls, x, alpha):
return torch.clamp(x, min=0)
@classmethod
def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True):
return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50,
ensure_sum_one=True)
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y > 0
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None, None
class SparsemaxBisectNew(nn.Module):
def __init__(self, dim=-1, n_iter=None):
"""sparsemax: normalizing sparse transform (a la softmax) via bisection
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cifkao/entmax | SparsemaxBisect | false | 15,042 | [
"MIT"
]
| 298 | f18bab9318f9d2471a36545ee0b4c97be6d48a87 | https://github.com/cifkao/entmax/tree/f18bab9318f9d2471a36545ee0b4c97be6d48a87 |
AttentionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cw/ccwt32zwy5mh42zsrjbrhh5m7edzp5o4jq2ybntxox3s6bfqnvan.py
# Topologically Sorted Source Nodes: [global_context, transformed_global], Original ATen: [aten.mean, aten.tanh, aten.tanh_backward]
# Source node to ATen node mapping:
# global_context => mean
# transformed_global => tanh
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_1, [1]), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%mean,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_2), kwargs = {})
triton_poi_fused_mean_tanh_tanh_backward_0 = async_compile.triton('triton_poi_fused_mean_tanh_tanh_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_tanh_tanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_tanh_tanh_backward_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = tmp9 * tmp9
tmp11 = 1.0
tmp12 = tmp11 - tmp10
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bp/cbpvdw3mp6xeofkt2trxpj2olsonpd5mtuzpi75qzm34orwv4vbo.py
# Topologically Sorted Source Nodes: [sigmoid_scores], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid_scores => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%bmm,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [global_context, transformed_global], Original ATen: [aten.mean, aten.tanh, aten.tanh_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_tanh_tanh_backward_0.run(buf0, buf1, buf5, 16, grid=grid(16), stream=stream0)
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 0), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [sigmoid_scores], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf3, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [representation], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf3, out=buf4)
return (buf4, buf3, primals_1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
class AttentionModule(torch.nn.Module):
"""
SimGNN Attention Module to make a pass on graph.
"""
def __init__(self, args):
"""
:param args: Arguments object.
"""
super(AttentionModule, self).__init__()
self.args = args
self.setup_weights()
self.init_parameters()
def setup_weights(self):
"""
Defining weights.
"""
self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.args.
filters_3, self.args.filters_3))
def init_parameters(self):
"""
Initializing weights.
"""
torch.nn.init.xavier_uniform_(self.weight_matrix)
def forward(self, embedding):
"""
Making a forward propagation pass to create a graph level representation.
:param embedding: Result of the GCN.
:return representation: A graph level representation vector.
"""
batch_size = embedding.shape[0]
global_context = torch.mean(torch.matmul(embedding, self.
weight_matrix), dim=1)
transformed_global = torch.tanh(global_context)
sigmoid_scores = torch.sigmoid(torch.matmul(embedding,
transformed_global.view(batch_size, -1, 1)))
representation = torch.matmul(embedding.permute(0, 2, 1),
sigmoid_scores)
return representation, sigmoid_scores
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(filters_3=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_tanh_tanh_backward_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = tmp9 * tmp9
tmp11 = 1.0
tmp12 = tmp11 - tmp10
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_tanh_tanh_backward_0[grid(16)](buf0, buf1,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(buf1, (4, 4, 1), (
4, 1, 0), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_sigmoid_1[grid(16)](buf3, 16, XBLOCK=16, num_warps
=1, num_stages=1)
buf4 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 4), (16, 1,
4), 0), buf3, out=buf4)
return buf4, buf3, primals_1, buf3, buf5
class AttentionModuleNew(torch.nn.Module):
"""
SimGNN Attention Module to make a pass on graph.
"""
def __init__(self, args):
"""
:param args: Arguments object.
"""
super(AttentionModuleNew, self).__init__()
self.args = args
self.setup_weights()
self.init_parameters()
def setup_weights(self):
"""
Defining weights.
"""
self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.args.
filters_3, self.args.filters_3))
def init_parameters(self):
"""
Initializing weights.
"""
torch.nn.init.xavier_uniform_(self.weight_matrix)
def forward(self, input_0):
primals_2 = self.weight_matrix
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| cloudcjf/SG_PR | AttentionModule | false | 15,043 | [
"MIT"
]
| 105 | 1339d00811ea3c4c18963efa24bf6fc778e15794 | https://github.com/cloudcjf/SG_PR/tree/1339d00811ea3c4c18963efa24bf6fc778e15794 |
GraphConvolution | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wv/cwvsxo4q6wyoxpozsubbimmg6xvl34ow44hy6yl5mwa23uuy77sa.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
del buf0
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
return (buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import Parameter
from torch.nn import functional as F
import torch.multiprocessing
import torch.utils.data
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch.nn.modules.loss
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch.nn import Parameter
from torch.nn import functional as F
import torch.multiprocessing
import torch.utils.data
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch.nn.modules.loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class GraphConvolutionNew(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.0, act=F.relu):
super(GraphConvolutionNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| cminusQAQ/graph4nlp | GraphConvolution | false | 15,044 | [
"Apache-2.0"
]
| 1,269 | d980e897131f1b9d3766750c06316d94749904fa | https://github.com/cminusQAQ/graph4nlp/tree/d980e897131f1b9d3766750c06316d94749904fa |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.