entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
JSCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wv/cwvti54lsojjpkh6f73xsvv55wjtno2rrmtjgaznlsjst37yn74a.py
# Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax_1 => amax_3, sub_5
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_3), kwargs = {})
triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ew/cewejal4rpvwi6jnzulqnyb3ftv6rl774lzryrdhbo5q76xi6iai.py
# Topologically Sorted Source Nodes: [softmax, softmax_1, m, m_1, kl_div, log_softmax, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.add, aten.mul, aten.xlogy, aten._log_softmax, aten.sub, aten.sum, aten.div]
# Source node to ATen node mapping:
# kl_div => div_2, eq, full_default, full_default_1, isnan, log_1, mul_1, mul_2, sub_4, sum_4, where, where_1
# kl_div_1 => div_3, eq_1, full_default_2, full_default_3, isnan_1, log_3, mul_3, mul_4, sub_7, sum_6, where_2, where_3
# log_softmax => exp_2, log, sub_3, sum_3
# log_softmax_1 => exp_3, log_2, sub_6, sum_5
# loss => add_1
# loss_1 => mul_5
# m => add
# m_1 => mul
# softmax => div, sum_1
# softmax_1 => div_1, sum_2
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %mul : [num_users=10] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%mul,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%mul, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub_3), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_4,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%mul,), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%mul, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log_3), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_5,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [-1], True), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_5,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %log_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_3), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_7,), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, %div_3), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {})
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1 = async_compile.triton('triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp46 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp10 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp48 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp49 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp9 / tmp16
tmp18 = tmp8 + tmp17
tmp19 = 0.5
tmp20 = tmp18 * tmp19
tmp21 = libdevice.isnan(tmp20).to(tl.int1)
tmp22 = 0.0
tmp23 = tmp20 == tmp22
tmp24 = tl_math.log(tmp20)
tmp25 = tmp20 * tmp24
tmp26 = tl.where(tmp23, tmp22, tmp25)
tmp27 = float("nan")
tmp28 = tl.where(tmp21, tmp27, tmp26)
tmp31 = tl_math.exp(tmp30)
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tl_math.log(tmp40)
tmp42 = tmp29 - tmp41
tmp43 = tmp20 * tmp42
tmp44 = tmp28 - tmp43
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = _tmp46 + tmp45
_tmp46 = tl.where(rmask, tmp47, _tmp46)
tmp50 = tl_math.exp(tmp49)
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp50 + tmp52
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp60 = tl_math.log(tmp59)
tmp61 = tmp48 - tmp60
tmp62 = tmp20 * tmp61
tmp63 = tmp28 - tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = _tmp65 + tmp64
_tmp65 = tl.where(rmask, tmp66, _tmp65)
tmp46 = tl.sum(_tmp46, 1)[:, None]
tmp65 = tl.sum(_tmp65, 1)[:, None]
tmp67 = 0.25
tmp68 = tmp46 * tmp67
tmp69 = tmp65 * tmp67
tmp70 = tmp68 + tmp69
tmp71 = 1.0
tmp72 = tmp70 * tmp71
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp72, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0.run(arg1_1, buf0, buf5, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1, log_softmax], Original ATen: [aten._softmax, aten._log_softmax]
triton_poi_fused__log_softmax__softmax_0.run(arg0_1, buf1, buf3, 256, grid=grid(256), stream=stream0)
del arg0_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [softmax, softmax_1, m, m_1, kl_div, log_softmax, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.add, aten.mul, aten.xlogy, aten._log_softmax, aten.sub, aten.sum, aten.div]
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1.run(buf7, buf0, buf1, buf3, buf5, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf3
del buf5
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class JSCriterion(Criterion):
def __init__(self, alpha=1.0, name='JS Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1,
reduction='batchmean'):
"""input/target: logits
"""
input = input.float()
target = target.float()
m = F.softmax(target.detach(), dim=-1, dtype=torch.float32
) + F.softmax(input.detach(), dim=-1, dtype=torch.float32)
m = 0.5 * m
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),
m, reduction=reduction) + F.kl_div(F.log_softmax(target, dim=-1,
dtype=torch.float32), m, reduction=reduction)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp46 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp10 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp30 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp48 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp49 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp9 / tmp16
tmp18 = tmp8 + tmp17
tmp19 = 0.5
tmp20 = tmp18 * tmp19
tmp21 = libdevice.isnan(tmp20).to(tl.int1)
tmp22 = 0.0
tmp23 = tmp20 == tmp22
tmp24 = tl_math.log(tmp20)
tmp25 = tmp20 * tmp24
tmp26 = tl.where(tmp23, tmp22, tmp25)
tmp27 = float('nan')
tmp28 = tl.where(tmp21, tmp27, tmp26)
tmp31 = tl_math.exp(tmp30)
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tl_math.log(tmp40)
tmp42 = tmp29 - tmp41
tmp43 = tmp20 * tmp42
tmp44 = tmp28 - tmp43
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = _tmp46 + tmp45
_tmp46 = tl.where(rmask, tmp47, _tmp46)
tmp50 = tl_math.exp(tmp49)
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp50 + tmp52
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp60 = tl_math.log(tmp59)
tmp61 = tmp48 - tmp60
tmp62 = tmp20 * tmp61
tmp63 = tmp28 - tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = _tmp65 + tmp64
_tmp65 = tl.where(rmask, tmp66, _tmp65)
tmp46 = tl.sum(_tmp46, 1)[:, None]
tmp65 = tl.sum(_tmp65, 1)[:, None]
tmp67 = 0.25
tmp68 = tmp46 * tmp67
tmp69 = tmp65 * tmp67
tmp70 = tmp68 + tmp69
tmp71 = 1.0
tmp72 = tmp70 * tmp71
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp72, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf1,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf7 = buf4
del buf4
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1[grid
(1)](buf7, buf0, buf1, buf3, buf5, 1, 256, XBLOCK=1, RBLOCK=256,
num_warps=8, num_stages=1)
del buf0
del buf1
del buf3
del buf5
return buf7,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class JSCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='JS Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kiminh/mt-dnn | JSCriterion | false | 7,033 | [
"MIT"
] | 1 | 133884b380244dbe74acc4d7507e551b2c5035b3 | https://github.com/kiminh/mt-dnn/tree/133884b380244dbe74acc4d7507e551b2c5035b3 | import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class Model(Criterion):
def __init__(self, alpha=1.0, name='JS Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1,
reduction='batchmean'):
"""input/target: logits
"""
input = input.float()
target = target.float()
m = F.softmax(target.detach(), dim=-1, dtype=torch.float32
) + F.softmax(input.detach(), dim=-1, dtype=torch.float32)
m = 0.5 * m
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),
m, reduction=reduction) + F.kl_div(F.log_softmax(target, dim=-1,
dtype=torch.float32), m, reduction=reduction)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
NsSymKlCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/m3/cm3oyfplxv4jfq4lzivy2ytiixh7r3jdutou2fi3p7bnl5g734sl.py
# Topologically Sorted Source Nodes: [log_softmax, log_softmax_3], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# log_softmax_3 => amax_3, sub_9
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax_3), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dc/cdcsseiicjcgc6sqlyyouett3fn2xszalsbnt3viybno45khl7l3.py
# Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, truediv_2, log_softmax_2, p_1, add_4, truediv_3, sub_3, add_5, log_2, rp_1, log_softmax_3, y_1, add_6, truediv_4, sub_4, add_7, log_3, ry_1, sub_5, mul_2, mul_3, sum_2, truediv_5, loss, loss_1], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# add_7 => add_7
# log => log_2
# log_1 => log_3
# log_2 => log_6
# log_3 => log_7
# log_softmax => exp, log, sub_1, sum_1
# log_softmax_1 => exp_2, log_1, sub_3, sum_2
# log_softmax_2 => exp_4, log_4, sub_8, sum_4
# log_softmax_3 => exp_6, log_5, sub_10, sum_5
# loss => add_8
# loss_1 => mul_8
# mul => mul_2
# mul_1 => mul_3
# mul_2 => mul_6
# mul_3 => mul_7
# p => exp_1
# p_1 => exp_5
# rp => neg
# rp_1 => neg_2
# ry => neg_1
# ry_1 => neg_3
# sub => sub_4
# sub_1 => sub_5
# sub_2 => sub_6
# sub_3 => sub_11
# sub_4 => sub_12
# sub_5 => sub_13
# sum_1 => sum_3
# sum_2 => sum_6
# truediv => mul, reciprocal
# truediv_1 => mul_1, reciprocal_1
# truediv_2 => div
# truediv_3 => mul_4, reciprocal_2
# truediv_4 => mul_5, reciprocal_3
# truediv_5 => div_1
# y => exp_3
# y_1 => exp_7
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1e-06), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_4, 1e-06), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_2,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log_1), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_3, 1e-06), kwargs = {})
# %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1.0), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, 1e-06), kwargs = {})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_3,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg, %neg_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_1, %sub_6), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 64), kwargs = {})
# %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_7,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [1], True), kwargs = {})
# %log_4 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_7, %log_4), kwargs = {})
# %exp_5 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_8,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_5, 1e-06), kwargs = {})
# %reciprocal_2 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_4,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_2, 1.0), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, 1), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_11, 1e-06), kwargs = {})
# %log_6 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_5,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_6,), kwargs = {})
# %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_9,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_6, [1], True), kwargs = {})
# %log_5 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_5,), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_9, %log_5), kwargs = {})
# %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_10,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_7, 1e-06), kwargs = {})
# %reciprocal_3 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_6,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_3, 1.0), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 1), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_12, 1e-06), kwargs = {})
# %log_7 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_7,), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_7,), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %neg_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_5, %sub_13), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, 2), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_7,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 64), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 1.0), kwargs = {})
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1 = async_compile.triton('triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp102 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp26 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp54 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp55 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp60 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp63 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp77 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp79 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp85 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1, 1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tmp21 = tmp20 - tmp19
tmp22 = tmp21 + tmp15
tmp23 = tl_math.log(tmp22)
tmp24 = -tmp23
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 + tmp15
tmp41 = tmp17 / tmp40
tmp42 = tmp41 * tmp19
tmp43 = tmp42 - tmp19
tmp44 = tmp43 + tmp15
tmp45 = tl_math.log(tmp44)
tmp46 = -tmp45
tmp47 = tmp24 - tmp46
tmp48 = tmp14 * tmp47
tmp49 = 2.0
tmp50 = tmp48 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = _tmp52 + tmp51
_tmp52 = tl.where(rmask, tmp53, _tmp52)
tmp56 = tl_math.exp(tmp55)
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp62 + tmp64
tmp66 = tl_math.log(tmp65)
tmp67 = tmp54 - tmp66
tmp68 = tl_math.exp(tmp67)
tmp69 = tmp68 + tmp15
tmp70 = tmp17 / tmp69
tmp71 = tmp70 * tmp19
tmp72 = tmp71 - tmp19
tmp73 = tmp72 + tmp15
tmp74 = tl_math.log(tmp73)
tmp75 = -tmp74
tmp78 = tl_math.exp(tmp77)
tmp80 = tl_math.exp(tmp79)
tmp81 = tmp78 + tmp80
tmp83 = tl_math.exp(tmp82)
tmp84 = tmp81 + tmp83
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp84 + tmp86
tmp88 = tl_math.log(tmp87)
tmp89 = tmp76 - tmp88
tmp90 = tl_math.exp(tmp89)
tmp91 = tmp90 + tmp15
tmp92 = tmp17 / tmp91
tmp93 = tmp92 * tmp19
tmp94 = tmp93 - tmp19
tmp95 = tmp94 + tmp15
tmp96 = tl_math.log(tmp95)
tmp97 = -tmp96
tmp98 = tmp75 - tmp97
tmp99 = tmp68 * tmp98
tmp100 = tmp99 * tmp49
tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK])
tmp103 = _tmp102 + tmp101
_tmp102 = tl.where(rmask, tmp103, _tmp102)
tmp52 = tl.sum(_tmp52, 1)[:, None]
tmp102 = tl.sum(_tmp102, 1)[:, None]
tmp104 = 0.015625
tmp105 = tmp52 * tmp104
tmp106 = tmp102 * tmp104
tmp107 = tmp105 + tmp106
tmp108 = 1.0
tmp109 = tmp107 * tmp108
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp109, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax, log_softmax_3], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, buf7, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax_1, log_softmax_2], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_0.run(arg1_1, buf2, buf5, 256, grid=grid(256), stream=stream0)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf10 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, truediv_2, log_softmax_2, p_1, add_4, truediv_3, sub_3, add_5, log_2, rp_1, log_softmax_3, y_1, add_6, truediv_4, sub_4, add_7, log_3, ry_1, sub_5, mul_2, mul_3, sum_2, truediv_5, loss, loss_1], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div]
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1.run(buf10, buf0, buf2, buf5, buf7, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf2
del buf5
del buf7
return (buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
def stable_kl(logit, target, epsilon=1e-06, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class NsSymKlCriterion(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = stable_kl(input, target.detach()) + stable_kl(target, input.
detach())
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp102 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp26 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp55 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp60 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp63 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp76 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp77 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp79 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp82 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp85 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1, 1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tmp21 = tmp20 - tmp19
tmp22 = tmp21 + tmp15
tmp23 = tl_math.log(tmp22)
tmp24 = -tmp23
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 + tmp15
tmp41 = tmp17 / tmp40
tmp42 = tmp41 * tmp19
tmp43 = tmp42 - tmp19
tmp44 = tmp43 + tmp15
tmp45 = tl_math.log(tmp44)
tmp46 = -tmp45
tmp47 = tmp24 - tmp46
tmp48 = tmp14 * tmp47
tmp49 = 2.0
tmp50 = tmp48 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = _tmp52 + tmp51
_tmp52 = tl.where(rmask, tmp53, _tmp52)
tmp56 = tl_math.exp(tmp55)
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp62 + tmp64
tmp66 = tl_math.log(tmp65)
tmp67 = tmp54 - tmp66
tmp68 = tl_math.exp(tmp67)
tmp69 = tmp68 + tmp15
tmp70 = tmp17 / tmp69
tmp71 = tmp70 * tmp19
tmp72 = tmp71 - tmp19
tmp73 = tmp72 + tmp15
tmp74 = tl_math.log(tmp73)
tmp75 = -tmp74
tmp78 = tl_math.exp(tmp77)
tmp80 = tl_math.exp(tmp79)
tmp81 = tmp78 + tmp80
tmp83 = tl_math.exp(tmp82)
tmp84 = tmp81 + tmp83
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp84 + tmp86
tmp88 = tl_math.log(tmp87)
tmp89 = tmp76 - tmp88
tmp90 = tl_math.exp(tmp89)
tmp91 = tmp90 + tmp15
tmp92 = tmp17 / tmp91
tmp93 = tmp92 * tmp19
tmp94 = tmp93 - tmp19
tmp95 = tmp94 + tmp15
tmp96 = tl_math.log(tmp95)
tmp97 = -tmp96
tmp98 = tmp75 - tmp97
tmp99 = tmp68 * tmp98
tmp100 = tmp99 * tmp49
tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK])
tmp103 = _tmp102 + tmp101
_tmp102 = tl.where(rmask, tmp103, _tmp102)
tmp52 = tl.sum(_tmp52, 1)[:, None]
tmp102 = tl.sum(_tmp102, 1)[:, None]
tmp104 = 0.015625
tmp105 = tmp52 * tmp104
tmp106 = tmp102 * tmp104
tmp107 = tmp105 + tmp106
tmp108 = 1.0
tmp109 = tmp107 * tmp108
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp109, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, buf7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf10 = buf4
del buf4
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1[
grid(1)](buf10, buf0, buf2, buf5, buf7, 1, 256, XBLOCK=1,
RBLOCK=256, num_warps=8, num_stages=1)
del buf0
del buf2
del buf5
del buf7
return buf10,
def stable_kl(logit, target, epsilon=1e-06, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class NsSymKlCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kiminh/mt-dnn | NsSymKlCriterion | false | 7,034 | [
"MIT"
] | 1 | 133884b380244dbe74acc4d7507e551b2c5035b3 | https://github.com/kiminh/mt-dnn/tree/133884b380244dbe74acc4d7507e551b2c5035b3 | import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
def stable_kl(logit, target, epsilon=1e-06, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class Model(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = stable_kl(input, target.detach()) + stable_kl(target, input.
detach())
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BERTLhuc | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qb/cqby64dgyy5z4f3672ayjxwmzm77gdp3xkyq6vbunhcxkbluqral.py
# Topologically Sorted Source Nodes: [mul, sigmoid, hidden_states], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# hidden_states => mul_1
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 2.0), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid, hidden_states], Original ATen: [aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class BERTLhuc(nn.Module):
def __init__(self, config):
super(BERTLhuc, self).__init__()
self.lhuc = Parameter(torch.zeros(config.hidden_size))
def forward(self, hidden_states):
hidden_states = hidden_states * 2.0 * nn.functional.sigmoid(self.lhuc)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](primals_1, primals_2,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class BERTLhucNew(nn.Module):
def __init__(self, config):
super(BERTLhucNew, self).__init__()
self.lhuc = Parameter(torch.zeros(config.hidden_size))
def forward(self, input_0):
primals_2 = self.lhuc
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| DAQuestionAnswering/Bert-n-Pals | BERTLhuc | false | 7,035 | [
"MIT"
] | 1 | d5a288b9ac62259e70c249635108ba3906e19f00 | https://github.com/DAQuestionAnswering/Bert-n-Pals/tree/d5a288b9ac62259e70c249635108ba3906e19f00 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.lhuc = Parameter(torch.zeros(config.hidden_size))
def forward(self, hidden_states):
hidden_states = hidden_states * 2.0 * nn.functional.sigmoid(self.lhuc)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Cosine | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/65/c65fzntschmihaixi34nxrnc3ebvynor7gvbhgvxkbc5w2qsknbs.py
# Topologically Sorted Source Nodes: [norm, norm_1, mul, add], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# norm => pow_1, pow_2, sum_1
# norm_1 => pow_3, pow_4, sum_2
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %pow_4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-09), kwargs = {})
triton_poi_fused_add_linalg_vector_norm_mul_1 = async_compile.triton('triton_poi_fused_add_linalg_vector_norm_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_linalg_vector_norm_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp11 * tmp23
tmp25 = 1e-09
tmp26 = tmp24 + tmp25
tl.store(out_ptr0 + (x0), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bh/cbhnrrn4ctzyyoqx65k45oybf5e5pftynoyjwxgz6u3k34xuioxs.py
# Topologically Sorted Source Nodes: [norm, norm_1, mul, add, truediv, squeeze], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add, aten.div, aten.squeeze]
# Source node to ATen node mapping:
# add => add
# mul => mul
# norm => pow_1, pow_2, sum_1
# norm_1 => pow_3, pow_4, sum_2
# squeeze => squeeze
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %pow_4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-09), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %add), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%div,), kwargs = {})
triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [norm, norm_1, mul, add], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add]
triton_poi_fused_add_linalg_vector_norm_mul_1.run(arg0_1, arg1_1, buf2, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [norm, norm_1, mul, add, truediv, squeeze], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add, aten.div, aten.squeeze]
triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2.run(buf3, buf2, 256, grid=grid(256), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch.optim.lr_scheduler import *
class Cosine(torch.nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, src, tgt):
src = src.float()
tgt = tgt.float()
return (torch.matmul(src, tgt.transpose(2, 1)) / (src.norm(p=2, dim
=-1, keepdim=True) * tgt.norm(p=2, dim=-1, keepdim=True) + 1e-09)
).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config()}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_linalg_vector_norm_mul_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp11 * tmp23
tmp25 = 1e-09
tmp26 = tmp24 + tmp25
tl.store(out_ptr0 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out
=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_linalg_vector_norm_mul_1[grid(64)](arg0_1,
arg1_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2[grid(256)](
buf3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
return buf3,
class CosineNew(torch.nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kiminh/mt-dnn | Cosine | false | 7,036 | [
"MIT"
] | 1 | 133884b380244dbe74acc4d7507e551b2c5035b3 | https://github.com/kiminh/mt-dnn/tree/133884b380244dbe74acc4d7507e551b2c5035b3 | from _paritybench_helpers import _mock_config
import torch
from torch.optim.lr_scheduler import *
class Model(torch.nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, src, tgt):
src = src.float()
tgt = tgt.float()
return (torch.matmul(src, tgt.transpose(2, 1)) / (src.norm(p=2, dim
=-1, keepdim=True) * tgt.norm(p=2, dim=-1, keepdim=True) + 1e-09)
).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ConvLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5v/c5v7wabpbwevjm6yvut3g2fo5ffi7es7i6f733j6xjrzrnhfheet.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%permute, [3, 3], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 10
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = (-3) + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-12) + y0 + (4*x2) + (16*y1)), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x2 + (10*y3)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ee/ceel7gqb6bxxi6v5akykl67eptfcm6duyq2mtmqrub2kloaw7htp.py
# Topologically Sorted Source Nodes: [conv1d, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv1d => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 7), (28, 7, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 10), (40, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 16, 10, grid=grid(16, 10), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv1d, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_3, buf3, 64, grid=grid(64), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 7), (28, 7, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ConvLayer(nn.Module):
"""1-D Convolution layer to extract high-level features of each time-series input
:param n_features: Number of input features/nodes
:param window_size: length of the input sequence
:param kernel_size: size of kernel to use in the convolution operation
"""
def __init__(self, n_features, kernel_size=7):
super(ConvLayer, self).__init__()
self.padding = nn.ConstantPad1d((kernel_size - 1) // 2, 0.0)
self.conv = nn.Conv1d(in_channels=n_features, out_channels=
n_features, kernel_size=kernel_size)
self.relu = nn.ReLU()
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.padding(x)
x = self.relu(self.conv(x))
return x.permute(0, 2, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 10
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = -3 + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-12 + y0 + 4 * x2 + 16 * y1), tmp5 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x2 + 10 * y3), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 7), (28, 7, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 10), (40, 10, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(16, 10)](primals_1, buf0,
16, 10, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(64)](buf2,
primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), primals_2, buf0, buf3
class ConvLayerNew(nn.Module):
"""1-D Convolution layer to extract high-level features of each time-series input
:param n_features: Number of input features/nodes
:param window_size: length of the input sequence
:param kernel_size: size of kernel to use in the convolution operation
"""
def __init__(self, n_features, kernel_size=7):
super(ConvLayerNew, self).__init__()
self.padding = nn.ConstantPad1d((kernel_size - 1) // 2, 0.0)
self.conv = nn.Conv1d(in_channels=n_features, out_channels=
n_features, kernel_size=kernel_size)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| kj21choi/LATAD | ConvLayer | false | 7,037 | [
"MIT"
] | 1 | 80d91e0f251ad0225342ee30e2461a39fa9cca97 | https://github.com/kj21choi/LATAD/tree/80d91e0f251ad0225342ee30e2461a39fa9cca97 | import torch
from torch import nn
class Model(nn.Module):
"""1-D Convolution layer to extract high-level features of each time-series input
:param n_features: Number of input features/nodes
:param window_size: length of the input sequence
:param kernel_size: size of kernel to use in the convolution operation
"""
def __init__(self, n_features, kernel_size=7):
super().__init__()
self.padding = nn.ConstantPad1d((kernel_size - 1) // 2, 0.0)
self.conv = nn.Conv1d(in_channels=n_features, out_channels=
n_features, kernel_size=kernel_size)
self.relu = nn.ReLU()
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.padding(x)
x = self.relu(self.conv(x))
return x.permute(0, 2, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
SymKlCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wv/cwvti54lsojjpkh6f73xsvv55wjtno2rrmtjgaznlsjst37yn74a.py
# Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax_1 => amax_2, sub_4
# softmax => amax_1, exp_1, sub_2
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {})
# %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_2), kwargs = {})
triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7m/c7m62lqyfgv73n2cb7qnzr2xodmurvv7yy6z2m7nhu5m3pfdjjxe.py
# Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# softmax_1 => amax_3, exp_3, sub_6
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_6,), kwargs = {})
triton_poi_fused__log_softmax__softmax_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7u/c7udesn7p5qs3jrwgyf3hzg46tjjmy4azf3ycyya63euoc5gcym5.py
# Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div, aten.add]
# Source node to ATen node mapping:
# kl_div => div_1, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1
# kl_div_1 => div_3, eq_1, full_default_2, full_default_3, isnan_1, log_3, mul_2, mul_3, sub_7, sum_6, where_2, where_3
# log_softmax => exp, log, sub_1, sum_1
# log_softmax_1 => exp_2, log_2, sub_5, sum_4
# loss => add
# loss_1 => mul_4
# softmax => div, sum_2
# softmax_1 => div_2, sum_5
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [-1], True), kwargs = {})
# %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_5), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_3), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_3), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [-1], True), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %log_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_2), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_7,), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %div_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {})
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp18 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp37 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp52 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp60 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float("nan")
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = _tmp34 + tmp33
_tmp34 = tl.where(rmask, tmp35, _tmp34)
tmp39 = tmp37 + tmp38
tmp41 = tmp39 + tmp40
tmp43 = tmp41 + tmp42
tmp44 = tmp36 / tmp43
tmp45 = libdevice.isnan(tmp44).to(tl.int1)
tmp46 = tmp44 == tmp10
tmp47 = tl_math.log(tmp44)
tmp48 = tmp44 * tmp47
tmp49 = tl.where(tmp46, tmp10, tmp48)
tmp50 = tl.where(tmp45, tmp15, tmp49)
tmp53 = tl_math.exp(tmp52)
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp63 = tl_math.log(tmp62)
tmp64 = tmp51 - tmp63
tmp65 = tmp44 * tmp64
tmp66 = tmp50 - tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = _tmp68 + tmp67
_tmp68 = tl.where(rmask, tmp69, _tmp68)
tmp34 = tl.sum(_tmp34, 1)[:, None]
tmp68 = tl.sum(_tmp68, 1)[:, None]
tmp70 = 0.25
tmp71 = tmp34 * tmp70
tmp72 = tmp68 * tmp70
tmp73 = tmp71 + tmp72
tmp74 = 1.0
tmp75 = tmp73 * tmp74
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp75, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0.run(arg1_1, buf0, buf6, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax]
triton_poi_fused__log_softmax__softmax_1.run(arg0_1, buf2, buf4, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div, aten.add]
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2.run(buf8, buf0, buf2, buf4, buf6, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf2
del buf4
del buf6
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class SymKlCriterion(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),
F.softmax(target.detach(), dim=-1, dtype=torch.float32),
reduction='batchmean') + F.kl_div(F.log_softmax(target, dim=-1,
dtype=torch.float32), F.softmax(input.detach(), dim=-1, dtype=
torch.float32), reduction='batchmean')
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp37 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp52 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp60 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = _tmp34 + tmp33
_tmp34 = tl.where(rmask, tmp35, _tmp34)
tmp39 = tmp37 + tmp38
tmp41 = tmp39 + tmp40
tmp43 = tmp41 + tmp42
tmp44 = tmp36 / tmp43
tmp45 = libdevice.isnan(tmp44).to(tl.int1)
tmp46 = tmp44 == tmp10
tmp47 = tl_math.log(tmp44)
tmp48 = tmp44 * tmp47
tmp49 = tl.where(tmp46, tmp10, tmp48)
tmp50 = tl.where(tmp45, tmp15, tmp49)
tmp53 = tl_math.exp(tmp52)
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp63 = tl_math.log(tmp62)
tmp64 = tmp51 - tmp63
tmp65 = tmp44 * tmp64
tmp66 = tmp50 - tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = _tmp68 + tmp67
_tmp68 = tl.where(rmask, tmp69, _tmp68)
tmp34 = tl.sum(_tmp34, 1)[:, None]
tmp68 = tl.sum(_tmp68, 1)[:, None]
tmp70 = 0.25
tmp71 = tmp34 * tmp70
tmp72 = tmp68 * tmp70
tmp73 = tmp71 + tmp72
tmp74 = 1.0
tmp75 = tmp73 * tmp74
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp75, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0,
buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_1[grid(256)](arg0_1, buf2,
buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf8 = buf3
del buf3
triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2[grid
(1)](buf8, buf0, buf2, buf4, buf6, 1, 256, XBLOCK=1, RBLOCK=256,
num_warps=8, num_stages=1)
del buf0
del buf2
del buf4
del buf6
return buf8,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class SymKlCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kiminh/mt-dnn | SymKlCriterion | false | 7,038 | [
"MIT"
] | 1 | 133884b380244dbe74acc4d7507e551b2c5035b3 | https://github.com/kiminh/mt-dnn/tree/133884b380244dbe74acc4d7507e551b2c5035b3 | import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class Model(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits
"""
input = input.float()
target = target.float()
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32),
F.softmax(target.detach(), dim=-1, dtype=torch.float32),
reduction='batchmean') + F.kl_div(F.log_softmax(target, dim=-1,
dtype=torch.float32), F.softmax(input.detach(), dim=-1, dtype=
torch.float32), reduction='batchmean')
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
NN1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yc/cycrh7pnpyj5s6mtdlfj5xpr2zq4rm3ufjw2yyxl3376x4yurnca.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/37/c3764ilm43eabzzgx7el4zu5rmxb55hhnnlyfjqufw4iufzt5t7r.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 25
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xr/cxrxf4nkydknjv7xhdecpyrprhviagsqwicrk4lpp64qv2hkzaxp.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (60, 4), (4, 1))
assert_size_stride(primals_2, (60, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (25, 60), (60, 1))
assert_size_stride(primals_5, (25, ), (1, ))
assert_size_stride(primals_6, (1, 25), (25, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 60), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 60), (960, 240, 60, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 3840, grid=grid(3840), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 25), (25, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 60), (60, 1), 0), reinterpret_tensor(primals_4, (60, 25), (1, 60), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 25), (400, 100, 25, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 25), (400, 100, 25, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 1600, grid=grid(1600), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 25), (25, 1), 0), reinterpret_tensor(primals_6, (25, 1), (1, 25), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf5, primals_7, 64, grid=grid(64), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 60), (60, 1), 0), reinterpret_tensor(buf3, (64, 25), (25, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((60, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((60, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((25, 60), (60, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 25), (25, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class NN1(nn.Module):
def __init__(self, input_dimension):
super(NN1, self).__init__()
self.linear1 = nn.Linear(input_dimension, 60)
self.linear2 = nn.Linear(60, 25)
self.linear3 = nn.Linear(25, 1)
self.sig = nn.Sigmoid()
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
x = self.relu(self.linear1(x))
x = self.dropout(x)
x = self.relu(self.linear2(x))
x = self.dropout(x)
x = self.linear3(x)
return self.sig(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 25
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (60, 4), (4, 1))
assert_size_stride(primals_2, (60,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (25, 60), (60, 1))
assert_size_stride(primals_5, (25,), (1,))
assert_size_stride(primals_6, (1, 25), (25, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 60), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 60), (960, 240, 60, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3840)](buf1,
primals_2, buf7, 3840, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 25), (25, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 60), (60, 1), 0),
reinterpret_tensor(primals_4, (60, 25), (1, 60), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 25), (400, 100, 25, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 25), (400, 100, 25, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(1600)](buf3,
primals_5, buf6, 1600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 25), (25, 1), 0),
reinterpret_tensor(primals_6, (25, 1), (1, 25), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_sigmoid_2[grid(64)](buf5, primals_7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 60), (60, 1), 0), reinterpret_tensor(
buf3, (64, 25), (25, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class NN1New(nn.Module):
def __init__(self, input_dimension):
super(NN1New, self).__init__()
self.linear1 = nn.Linear(input_dimension, 60)
self.linear2 = nn.Linear(60, 25)
self.linear3 = nn.Linear(25, 1)
self.sig = nn.Sigmoid()
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kirtanp/MAMO-fair | NN1 | false | 7,039 | [
"Apache-2.0"
] | 1 | fd0fc39383f11a9e1ec401233b89c2399860fb94 | https://github.com/kirtanp/MAMO-fair/tree/fd0fc39383f11a9e1ec401233b89c2399860fb94 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dimension):
super().__init__()
self.linear1 = nn.Linear(input_dimension, 60)
self.linear2 = nn.Linear(60, 25)
self.linear3 = nn.Linear(25, 1)
self.sig = nn.Sigmoid()
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
x = self.relu(self.linear1(x))
x = self.dropout(x)
x = self.relu(self.linear2(x))
x = self.dropout(x)
x = self.linear3(x)
return self.sig(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Transformation | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6t/c6tpumag7qsqvih3vubnm4biztoygq4dhm6nv2n5sekvgkolaovm.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# h_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.3), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.3
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7u/c7upefbvn6vjogzxdjbutwncsxrgeel7zak34by4r56h5wknmu6g.py
# Topologically Sorted Source Nodes: [m, t_x], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# m => sigmoid
# t_x => mul_3
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_7,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_1), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 256, grid=grid(256), stream=stream0)
del primals_3
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_0.run(buf6, primals_7, buf7, buf8, 256, grid=grid(256), stream=stream0)
del primals_7
buf9 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [h_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_9
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [m, t_x], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(buf9, primals_1, buf10, 256, grid=grid(256), stream=stream0)
return (buf10, primals_1, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), buf9, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Transformation(torch.nn.Module):
def __init__(self, input_size):
super(Transformation, self).__init__()
self.input_size = input_size
self.linear1 = torch.nn.Linear(self.input_size, self.input_size)
self.linear2 = torch.nn.Linear(self.input_size, self.input_size)
self.linear3 = torch.nn.Linear(self.input_size, self.input_size)
self.linear4 = torch.nn.Linear(self.input_size, self.input_size)
self.leaky_relu = nn.LeakyReLU(0.3)
def forward(self, x):
"""
Transforms input x with a mask M(x) followed by multiplication with x.
"""
h = self.linear1(x.float())
h = self.leaky_relu(h)
h = self.linear2(h)
h = self.leaky_relu(h)
h = self.linear3(h)
h = self.leaky_relu(h)
h = self.linear4(h)
m = torch.sigmoid(h)
t_x = m * x.float()
return t_x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.3
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_3, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf3, primals_5, buf4,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf6, primals_7, buf7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf9 = buf6
del buf6
extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_9
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf9, primals_1, buf10,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf10, primals_1, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (64, 4), (4, 1), 0
), buf9, primals_8, primals_6, primals_4
class TransformationNew(torch.nn.Module):
def __init__(self, input_size):
super(TransformationNew, self).__init__()
self.input_size = input_size
self.linear1 = torch.nn.Linear(self.input_size, self.input_size)
self.linear2 = torch.nn.Linear(self.input_size, self.input_size)
self.linear3 = torch.nn.Linear(self.input_size, self.input_size)
self.linear4 = torch.nn.Linear(self.input_size, self.input_size)
self.leaky_relu = nn.LeakyReLU(0.3)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.linear4.weight
primals_9 = self.linear4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| kj21choi/LATAD | Transformation | false | 7,040 | [
"MIT"
] | 1 | 80d91e0f251ad0225342ee30e2461a39fa9cca97 | https://github.com/kj21choi/LATAD/tree/80d91e0f251ad0225342ee30e2461a39fa9cca97 | import torch
from torch import nn
class Model(torch.nn.Module):
def __init__(self, input_size):
super().__init__()
self.input_size = input_size
self.linear1 = torch.nn.Linear(self.input_size, self.input_size)
self.linear2 = torch.nn.Linear(self.input_size, self.input_size)
self.linear3 = torch.nn.Linear(self.input_size, self.input_size)
self.linear4 = torch.nn.Linear(self.input_size, self.input_size)
self.leaky_relu = nn.LeakyReLU(0.3)
def forward(self, x):
"""
Transforms input x with a mask M(x) followed by multiplication with x.
"""
h = self.linear1(x.float())
h = self.leaky_relu(h)
h = self.linear2(h)
h = self.leaky_relu(h)
h = self.linear3(h)
h = self.leaky_relu(h)
h = self.linear4(h)
m = torch.sigmoid(h)
t_x = m * x.float()
return t_x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
TemporalAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ex/cex5vew232oddczegzsyu4nfhctnoekjuswo4kovc32y6njhkabm.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %repeat], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*(x1 // 4)) + (16*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*x2) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ll/cll7qwji5njf4ritvwichxub2dybleyohlvyi4bpb7yk6zr6s5tn.py
# Topologically Sorted Source Nodes: [a_input_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# a_input_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_3, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 4.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nq/cnqswidfunn76tq4f6odz2aeqivwtvetltfrzs7t24fmwnk35ffp.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x1) + (16*((1 + (4*x0)) // 16))), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x1) + (16*((1 + (2*x0)) // 8))), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x1) + (16*((3 + (4*x0)) // 16))), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xq/cxq2eoi5o5mt4fpccrvrqb77naakl7jklckwsypmcbq6c3gcg7bs.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, div, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex % 16
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3a/c3ae4zn4gadvsfvudsnoa5ecfuerwjqrfo6xzopxw7eykwwjjx7y.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# h => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%bmm,), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 8), (8, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (8, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 512, grid=grid(512), stream=stream0)
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [a_input_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf1, primals_3, buf2, buf3, 512, grid=grid(512), stream=stream0)
del buf1
del primals_3
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), primals_4, out=buf4)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, primals_5, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, primals_5, buf5, buf6, buf7, 64, grid=grid(64), stream=stream0)
del buf5
del buf6
del primals_5
buf8 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, primals_1, out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf9, 64, grid=grid(64), stream=stream0)
return (buf9, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2, buf7, buf9, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (8, 64), (1, 8), 0), reinterpret_tensor(primals_4, (1, 8), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class TemporalAttentionLayer(nn.Module):
"""Single Graph Temporal Attention Layer
:param n_features: number of input features/nodes
:param window_size: length of the input sequence
:param dropout: percentage of nodes to dropout
:param alpha: negative slope used in the leaky rely activation function
:param embed_dim: embedding dimension (output dimension of linear transformation)
:param use_gatv2: whether to use the modified attention mechanism of GATv2 instead of standard GAT
:param use_bias: whether to include a bias term in the attention layer
"""
def __init__(self, n_features, window_size, dropout, alpha, embed_dim=
None, use_gatv2=True, use_bias=True):
super(TemporalAttentionLayer, self).__init__()
self.n_features = n_features
self.window_size = window_size
self.dropout = dropout
self.use_gatv2 = use_gatv2
self.embed_dim = embed_dim if embed_dim is not None else n_features
self.num_nodes = window_size
self.use_bias = use_bias
if self.use_gatv2:
self.embed_dim *= 2
lin_input_dim = 2 * n_features
a_input_dim = self.embed_dim
else:
lin_input_dim = n_features
a_input_dim = 2 * self.embed_dim
self.lin = nn.Linear(lin_input_dim, self.embed_dim)
self.a = nn.Parameter(torch.empty((a_input_dim, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
if self.use_bias:
self.bias = nn.Parameter(torch.empty(window_size, window_size))
self.leakyrelu = nn.LeakyReLU(alpha)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
if self.use_gatv2:
a_input = self._make_attention_input(x)
a_input = self.leakyrelu(self.lin(a_input))
e = torch.matmul(a_input, self.a).squeeze(3)
else:
Wx = self.lin(x)
a_input = self._make_attention_input(Wx)
e = self.leakyrelu(torch.matmul(a_input, self.a)).squeeze(3)
if self.use_bias:
e += self.bias
attention = torch.softmax(e, dim=2)
attention = torch.dropout(attention, self.dropout, train=self.training)
h = self.sigmoid(torch.matmul(attention, x))
return h
def _make_attention_input(self, v):
"""Preparing the temporal attention mechanism.
Creating matrix with all possible combinations of concatenations of node values:
(v1, v2..)_t1 || (v1, v2..)_t1
(v1, v2..)_t1 || (v1, v2..)_t2
...
...
(v1, v2..)_tn || (v1, v2..)_t1
(v1, v2..)_tn || (v1, v2..)_t2
"""
K = self.num_nodes
blocks_repeating = v.repeat_interleave(K, dim=1)
blocks_alternating = v.repeat(1, K, 1)
combined = torch.cat((blocks_repeating, blocks_alternating), dim=2)
if self.use_gatv2:
return combined.view(v.size(0), K, K, 2 * self.n_features)
else:
return combined.view(v.size(0), K, K, 2 * self.embed_dim)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'window_size': 4, 'dropout': 0.5, 'alpha': 4}
]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * (x1 // 4) + 16 * x2 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 &
xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 4.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1 + 16 * ((1 + 4 * x0) //
16)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x1 + 16 * ((1 + 2 * x0) //
8)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1 + 16 * ((3 + 4 * x0) //
16)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex % 16
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 8), (8, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (8, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_3, buf2,
buf3, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
primals_4, out=buf4)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf4, primals_5, buf5, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf4, primals_5, buf5, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_5
buf8 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf7, primals_1, out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_sigmoid_4[grid(64)](buf9, 64, XBLOCK=64, num_warps
=1, num_stages=1)
return buf9, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf2, buf7, buf9, reinterpret_tensor(primals_1, (4, 4, 4), (16,
1, 4), 0), reinterpret_tensor(buf3, (8, 64), (1, 8), 0
), reinterpret_tensor(primals_4, (1, 8), (1, 1), 0)
class TemporalAttentionLayerNew(nn.Module):
"""Single Graph Temporal Attention Layer
:param n_features: number of input features/nodes
:param window_size: length of the input sequence
:param dropout: percentage of nodes to dropout
:param alpha: negative slope used in the leaky rely activation function
:param embed_dim: embedding dimension (output dimension of linear transformation)
:param use_gatv2: whether to use the modified attention mechanism of GATv2 instead of standard GAT
:param use_bias: whether to include a bias term in the attention layer
"""
def __init__(self, n_features, window_size, dropout, alpha, embed_dim=
None, use_gatv2=True, use_bias=True):
super(TemporalAttentionLayerNew, self).__init__()
self.n_features = n_features
self.window_size = window_size
self.dropout = dropout
self.use_gatv2 = use_gatv2
self.embed_dim = embed_dim if embed_dim is not None else n_features
self.num_nodes = window_size
self.use_bias = use_bias
if self.use_gatv2:
self.embed_dim *= 2
lin_input_dim = 2 * n_features
a_input_dim = self.embed_dim
else:
lin_input_dim = n_features
a_input_dim = 2 * self.embed_dim
self.lin = nn.Linear(lin_input_dim, self.embed_dim)
self.a = nn.Parameter(torch.empty((a_input_dim, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
if self.use_bias:
self.bias = nn.Parameter(torch.empty(window_size, window_size))
self.leakyrelu = nn.LeakyReLU(alpha)
self.sigmoid = nn.Sigmoid()
def _make_attention_input(self, v):
"""Preparing the temporal attention mechanism.
Creating matrix with all possible combinations of concatenations of node values:
(v1, v2..)_t1 || (v1, v2..)_t1
(v1, v2..)_t1 || (v1, v2..)_t2
...
...
(v1, v2..)_tn || (v1, v2..)_t1
(v1, v2..)_tn || (v1, v2..)_t2
"""
K = self.num_nodes
blocks_repeating = v.repeat_interleave(K, dim=1)
blocks_alternating = v.repeat(1, K, 1)
combined = torch.cat((blocks_repeating, blocks_alternating), dim=2)
if self.use_gatv2:
return combined.view(v.size(0), K, K, 2 * self.n_features)
else:
return combined.view(v.size(0), K, K, 2 * self.embed_dim)
def forward(self, input_0):
primals_4 = self.a
primals_5 = self.bias
primals_2 = self.lin.weight
primals_3 = self.lin.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| kj21choi/LATAD | TemporalAttentionLayer | false | 7,041 | [
"MIT"
] | 1 | 80d91e0f251ad0225342ee30e2461a39fa9cca97 | https://github.com/kj21choi/LATAD/tree/80d91e0f251ad0225342ee30e2461a39fa9cca97 | import torch
from torch import nn
class Model(nn.Module):
"""Single Graph Temporal Attention Layer
:param n_features: number of input features/nodes
:param window_size: length of the input sequence
:param dropout: percentage of nodes to dropout
:param alpha: negative slope used in the leaky rely activation function
:param embed_dim: embedding dimension (output dimension of linear transformation)
:param use_gatv2: whether to use the modified attention mechanism of GATv2 instead of standard GAT
:param use_bias: whether to include a bias term in the attention layer
"""
def __init__(self, n_features, window_size, dropout, alpha, embed_dim=
None, use_gatv2=True, use_bias=True):
super().__init__()
self.n_features = n_features
self.window_size = window_size
self.dropout = dropout
self.use_gatv2 = use_gatv2
self.embed_dim = embed_dim if embed_dim is not None else n_features
self.num_nodes = window_size
self.use_bias = use_bias
if self.use_gatv2:
self.embed_dim *= 2
lin_input_dim = 2 * n_features
a_input_dim = self.embed_dim
else:
lin_input_dim = n_features
a_input_dim = 2 * self.embed_dim
self.lin = nn.Linear(lin_input_dim, self.embed_dim)
self.a = nn.Parameter(torch.empty((a_input_dim, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
if self.use_bias:
self.bias = nn.Parameter(torch.empty(window_size, window_size))
self.leakyrelu = nn.LeakyReLU(alpha)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
if self.use_gatv2:
a_input = self._make_attention_input(x)
a_input = self.leakyrelu(self.lin(a_input))
e = torch.matmul(a_input, self.a).squeeze(3)
else:
Wx = self.lin(x)
a_input = self._make_attention_input(Wx)
e = self.leakyrelu(torch.matmul(a_input, self.a)).squeeze(3)
if self.use_bias:
e += self.bias
attention = torch.softmax(e, dim=2)
attention = torch.dropout(attention, self.dropout, train=self.training)
h = self.sigmoid(torch.matmul(attention, x))
return h
def _make_attention_input(self, v):
"""Preparing the temporal attention mechanism.
Creating matrix with all possible combinations of concatenations of node values:
(v1, v2..)_t1 || (v1, v2..)_t1
(v1, v2..)_t1 || (v1, v2..)_t2
...
...
(v1, v2..)_tn || (v1, v2..)_t1
(v1, v2..)_tn || (v1, v2..)_t2
"""
K = self.num_nodes
blocks_repeating = v.repeat_interleave(K, dim=1)
blocks_alternating = v.repeat(1, K, 1)
combined = torch.cat((blocks_repeating, blocks_alternating), dim=2)
if self.use_gatv2:
return combined.view(v.size(0), K, K, 2 * self.n_features)
else:
return combined.view(v.size(0), K, K, 2 * self.embed_dim)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'window_size': 4, 'dropout': 0.5, 'alpha': 4}
]
|
UpConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tc/ctc7x3jpax35yx2pkxxfdyyfficonvfhhsbh4f7urq6xwaxnxl5l.py
# Topologically Sorted Source Nodes: [conv_transpose2d, elu], Original ATen: [aten.convolution, aten.elu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# elu => expm1, gt, mul, mul_2, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d, elu], Original ATen: [aten.convolution, aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class UpConv(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(UpConv, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels
=output_nc, kernel_size=2, bias=True, stride=2, padding=0)
self.activation_fn = nn.ELU()
def forward(self, input):
return self.activation_fn(self.deconv(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'output_nc': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(1024)](buf1, primals_2,
buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf1
class UpConvNew(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(UpConvNew, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels
=output_nc, kernel_size=2, bias=True, stride=2, padding=0)
self.activation_fn = nn.ELU()
def forward(self, input_0):
primals_1 = self.deconv.weight
primals_2 = self.deconv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| kkrish39/realtime-depth-prediction-from-monocular-videos | UpConv | false | 7,042 | [
"BSD-3-Clause"
] | 1 | 9cde9c1a6df6c91af1ada80b3aaeebae03fc59dc | https://github.com/kkrish39/realtime-depth-prediction-from-monocular-videos/tree/9cde9c1a6df6c91af1ada80b3aaeebae03fc59dc | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super().__init__()
self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels
=output_nc, kernel_size=2, bias=True, stride=2, padding=0)
self.activation_fn = nn.ELU()
def forward(self, input):
return self.activation_fn(self.deconv(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
FeatureAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/34/c34shfwg323xh3kp2lf57k62vrxzogp5n44jww2xcjz2fcm6btne.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %repeat], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x0) + (16*x2) + (x1 // 4)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*((-4) + x0)) + (16*x2) + (x1 % 4)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ll/cll7qwji5njf4ritvwichxub2dybleyohlvyi4bpb7yk6zr6s5tn.py
# Topologically Sorted Source Nodes: [a_input_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# a_input_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_3, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 4.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nq/cnqswidfunn76tq4f6odz2aeqivwtvetltfrzs7t24fmwnk35ffp.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x1) + (16*((1 + (4*x0)) // 16))), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x1) + (16*((1 + (2*x0)) // 8))), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x1) + (16*((3 + (4*x0)) // 16))), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xq/cxq2eoi5o5mt4fpccrvrqb77naakl7jklckwsypmcbq6c3gcg7bs.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, div, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex % 16
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sy/csytnno22v7zx2kiphvd6sr4bgtct6i2tfbyyvxa3bhxgburptdj.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
# Source node to ATen node mapping:
# h => sigmoid
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%bmm,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_1), kwargs = {})
triton_poi_fused_sigmoid_sigmoid_backward_4 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp1 * tmp3
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 8), (8, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (8, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 512, grid=grid(512), stream=stream0)
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [a_input_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf1, primals_3, buf2, buf3, 512, grid=grid(512), stream=stream0)
del buf1
del primals_3
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), primals_4, out=buf4)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, primals_5, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, primals_5, buf5, buf6, buf7, 64, grid=grid(64), stream=stream0)
del buf5
del buf6
del primals_5
buf8 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
triton_poi_fused_sigmoid_sigmoid_backward_4.run(buf9, buf10, 64, grid=grid(64), stream=stream0)
return (reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2, buf7, buf10, primals_1, reinterpret_tensor(buf3, (8, 64), (1, 8), 0), reinterpret_tensor(primals_4, (1, 8), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class FeatureAttentionLayer(nn.Module):
"""Single Graph Feature/Spatial Attention Layer
:param n_features: Number of input features/nodes
:param window_size: length of the input sequence
:param dropout: percentage of nodes to dropout
:param alpha: negative slope used in the leaky rely activation function
:param embed_dim: embedding dimension (output dimension of linear transformation)
:param use_gatv2: whether to use the modified attention mechanism of GATv2 instead of standard GAT
:param use_bias: whether to include a bias term in the attention layer
"""
def __init__(self, n_features, window_size, dropout, alpha, embed_dim=
None, use_gatv2=True, use_bias=True):
super(FeatureAttentionLayer, self).__init__()
self.n_features = n_features
self.window_size = window_size
self.dropout = dropout
self.embed_dim = embed_dim if embed_dim is not None else window_size
self.use_gatv2 = use_gatv2
self.num_nodes = n_features
self.use_bias = use_bias
if self.use_gatv2:
self.embed_dim *= 2
lin_input_dim = 2 * window_size
a_input_dim = self.embed_dim
else:
lin_input_dim = window_size
a_input_dim = 2 * self.embed_dim
self.lin = nn.Linear(lin_input_dim, self.embed_dim)
self.a = nn.Parameter(torch.empty((a_input_dim, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
if self.use_bias:
self.bias = nn.Parameter(torch.empty(n_features, n_features))
self.leakyrelu = nn.LeakyReLU(alpha)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.permute(0, 2, 1)
if self.use_gatv2:
a_input = self._make_attention_input(x)
a_input = self.leakyrelu(self.lin(a_input))
e = torch.matmul(a_input, self.a).squeeze(3)
else:
Wx = self.lin(x)
a_input = self._make_attention_input(Wx)
e = self.leakyrelu(torch.matmul(a_input, self.a)).squeeze(3)
if self.use_bias:
e += self.bias
attention = torch.softmax(e, dim=2)
attention = torch.dropout(attention, self.dropout, train=self.training)
h = self.sigmoid(torch.matmul(attention, x))
return h.permute(0, 2, 1)
def _make_attention_input(self, v):
"""Preparing the feature attention mechanism.
Creating matrix with all possible combinations of concatenations of node.
Each node consists of all values of that node within the window
v1 || v1,
...
v1 || vK,
v2 || v1,
...
v2 || vK,
...
...
vK || v1,
...
vK || vK,
"""
K = self.num_nodes
blocks_repeating = v.repeat_interleave(K, dim=1)
blocks_alternating = v.repeat(1, K, 1)
combined = torch.cat((blocks_repeating, blocks_alternating), dim=2)
if self.use_gatv2:
return combined.view(v.size(0), K, K, 2 * self.window_size)
else:
return combined.view(v.size(0), K, K, 2 * self.embed_dim)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'window_size': 4, 'dropout': 0.5, 'alpha': 4}
]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x2 + x1 // 4), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (-4 + x0) + 16 * x2 + x1 % 4), tmp6 &
xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 4.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1 + 16 * ((1 + 4 * x0) //
16)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x1 + 16 * ((1 + 2 * x0) //
8)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1 + 16 * ((3 + 4 * x0) //
16)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex % 16
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_4(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp1 * tmp3
tl.store(in_out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 8), (8, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (8, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_3, buf2,
buf3, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
primals_4, out=buf4)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf4, primals_5, buf5, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf4, primals_5, buf5, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_5
buf8 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf7, reinterpret_tensor(primals_1, (4, 4, 4), (
16, 1, 4), 0), out=buf8)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_sigmoid_sigmoid_backward_4[grid(64)](buf9, buf10,
64, XBLOCK=64, num_warps=1, num_stages=1)
return reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf2, buf7, buf10, primals_1, reinterpret_tensor(buf3, (8, 64),
(1, 8), 0), reinterpret_tensor(primals_4, (1, 8), (1, 1), 0)
class FeatureAttentionLayerNew(nn.Module):
"""Single Graph Feature/Spatial Attention Layer
:param n_features: Number of input features/nodes
:param window_size: length of the input sequence
:param dropout: percentage of nodes to dropout
:param alpha: negative slope used in the leaky rely activation function
:param embed_dim: embedding dimension (output dimension of linear transformation)
:param use_gatv2: whether to use the modified attention mechanism of GATv2 instead of standard GAT
:param use_bias: whether to include a bias term in the attention layer
"""
def __init__(self, n_features, window_size, dropout, alpha, embed_dim=
None, use_gatv2=True, use_bias=True):
super(FeatureAttentionLayerNew, self).__init__()
self.n_features = n_features
self.window_size = window_size
self.dropout = dropout
self.embed_dim = embed_dim if embed_dim is not None else window_size
self.use_gatv2 = use_gatv2
self.num_nodes = n_features
self.use_bias = use_bias
if self.use_gatv2:
self.embed_dim *= 2
lin_input_dim = 2 * window_size
a_input_dim = self.embed_dim
else:
lin_input_dim = window_size
a_input_dim = 2 * self.embed_dim
self.lin = nn.Linear(lin_input_dim, self.embed_dim)
self.a = nn.Parameter(torch.empty((a_input_dim, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
if self.use_bias:
self.bias = nn.Parameter(torch.empty(n_features, n_features))
self.leakyrelu = nn.LeakyReLU(alpha)
self.sigmoid = nn.Sigmoid()
def _make_attention_input(self, v):
"""Preparing the feature attention mechanism.
Creating matrix with all possible combinations of concatenations of node.
Each node consists of all values of that node within the window
v1 || v1,
...
v1 || vK,
v2 || v1,
...
v2 || vK,
...
...
vK || v1,
...
vK || vK,
"""
K = self.num_nodes
blocks_repeating = v.repeat_interleave(K, dim=1)
blocks_alternating = v.repeat(1, K, 1)
combined = torch.cat((blocks_repeating, blocks_alternating), dim=2)
if self.use_gatv2:
return combined.view(v.size(0), K, K, 2 * self.window_size)
else:
return combined.view(v.size(0), K, K, 2 * self.embed_dim)
def forward(self, input_0):
primals_4 = self.a
primals_5 = self.bias
primals_2 = self.lin.weight
primals_3 = self.lin.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| kj21choi/LATAD | FeatureAttentionLayer | false | 7,043 | [
"MIT"
] | 1 | 80d91e0f251ad0225342ee30e2461a39fa9cca97 | https://github.com/kj21choi/LATAD/tree/80d91e0f251ad0225342ee30e2461a39fa9cca97 | import torch
from torch import nn
class Model(nn.Module):
"""Single Graph Feature/Spatial Attention Layer
:param n_features: Number of input features/nodes
:param window_size: length of the input sequence
:param dropout: percentage of nodes to dropout
:param alpha: negative slope used in the leaky rely activation function
:param embed_dim: embedding dimension (output dimension of linear transformation)
:param use_gatv2: whether to use the modified attention mechanism of GATv2 instead of standard GAT
:param use_bias: whether to include a bias term in the attention layer
"""
def __init__(self, n_features, window_size, dropout, alpha, embed_dim=
None, use_gatv2=True, use_bias=True):
super().__init__()
self.n_features = n_features
self.window_size = window_size
self.dropout = dropout
self.embed_dim = embed_dim if embed_dim is not None else window_size
self.use_gatv2 = use_gatv2
self.num_nodes = n_features
self.use_bias = use_bias
if self.use_gatv2:
self.embed_dim *= 2
lin_input_dim = 2 * window_size
a_input_dim = self.embed_dim
else:
lin_input_dim = window_size
a_input_dim = 2 * self.embed_dim
self.lin = nn.Linear(lin_input_dim, self.embed_dim)
self.a = nn.Parameter(torch.empty((a_input_dim, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
if self.use_bias:
self.bias = nn.Parameter(torch.empty(n_features, n_features))
self.leakyrelu = nn.LeakyReLU(alpha)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.permute(0, 2, 1)
if self.use_gatv2:
a_input = self._make_attention_input(x)
a_input = self.leakyrelu(self.lin(a_input))
e = torch.matmul(a_input, self.a).squeeze(3)
else:
Wx = self.lin(x)
a_input = self._make_attention_input(Wx)
e = self.leakyrelu(torch.matmul(a_input, self.a)).squeeze(3)
if self.use_bias:
e += self.bias
attention = torch.softmax(e, dim=2)
attention = torch.dropout(attention, self.dropout, train=self.training)
h = self.sigmoid(torch.matmul(attention, x))
return h.permute(0, 2, 1)
def _make_attention_input(self, v):
"""Preparing the feature attention mechanism.
Creating matrix with all possible combinations of concatenations of node.
Each node consists of all values of that node within the window
v1 || v1,
...
v1 || vK,
v2 || v1,
...
v2 || vK,
...
...
vK || v1,
...
vK || vK,
"""
K = self.num_nodes
blocks_repeating = v.repeat_interleave(K, dim=1)
blocks_alternating = v.repeat(1, K, 1)
combined = torch.cat((blocks_repeating, blocks_alternating), dim=2)
if self.use_gatv2:
return combined.view(v.size(0), K, K, 2 * self.window_size)
else:
return combined.view(v.size(0), K, K, 2 * self.embed_dim)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'window_size': 4, 'dropout': 0.5, 'alpha': 4}
]
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/y6/cy6gw5ey7z3gwvyiz7vikmx2bo2jerjfzwj5q32cdzohhmrmzqnf.py
# Topologically Sorted Source Nodes: [conv2d, att], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# att => gt, mul, where
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dg/cdg6c37dvuiwmghein2piibcgrzrhtxl5brce6pwncltfel7dtrh.py
# Topologically Sorted Source Nodes: [att_max, att_avg], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
# Source node to ATen node mapping:
# att_avg => avg_pool2d
# att_max => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where, [3, 3], [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_avg_pool2d_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x5 = (xindex // 32)
x3 = (xindex // 65536)
x6 = xindex % 65536
x7 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-65) + (2*x0) + (128*x5)), tmp10, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-64) + (2*x0) + (128*x5)), tmp16, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-63) + (2*x0) + (128*x5)), tmp23, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (128*x5)), tmp30, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (128*x5)), tmp33, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x5)), tmp36, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (63 + (2*x0) + (128*x5)), tmp43, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x5)), tmp46, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x5)), tmp49, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.load(in_ptr0 + ((-65) + (2*x0) + (128*x5)), tmp10, eviction_policy='evict_last', other=0.0)
tmp78 = tl.load(in_ptr0 + ((-64) + (2*x0) + (128*x5)), tmp16, eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 + tmp77
tmp80 = tl.load(in_ptr0 + ((-63) + (2*x0) + (128*x5)), tmp23, eviction_policy='evict_last', other=0.0)
tmp81 = tmp80 + tmp79
tmp82 = tl.load(in_ptr0 + ((-1) + (2*x0) + (128*x5)), tmp30, eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 + tmp81
tmp84 = tl.load(in_ptr0 + ((2*x0) + (128*x5)), tmp33, eviction_policy='evict_last', other=0.0)
tmp85 = tmp84 + tmp83
tmp86 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x5)), tmp36, eviction_policy='evict_last', other=0.0)
tmp87 = tmp86 + tmp85
tmp88 = tl.load(in_ptr0 + (63 + (2*x0) + (128*x5)), tmp43, eviction_policy='evict_last', other=0.0)
tmp89 = tmp88 + tmp87
tmp90 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x5)), tmp46, eviction_policy='evict_last', other=0.0)
tmp91 = tmp90 + tmp89
tmp92 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x5)), tmp49, eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 + tmp91
tmp94 = 1 + ((-2)*x0) + ((-2)*x1) + (((65) * ((65) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (65)))*((65) * ((65) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (65)))) + ((-2)*x0*((65) * ((65) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (65)))) + ((-2)*x1*((65) * ((65) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (65)))) + (4*x0*x1) + ((65) * ((65) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (65))) + ((65) * ((65) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (65)))
tmp95 = tmp93 / tmp94
tl.store(out_ptr0 + (x6 + (131072*x3)), tmp51, None)
tl.store(out_ptr1 + (x7), tmp76, None)
tl.store(out_ptr2 + (x6 + (131072*x3)), tmp95, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uv/cuvilc5peipe3mxijp5fxg5mdd2pzopugi3t2phky43tmf6wdrng.py
# Topologically Sorted Source Nodes: [conv2d_1, att_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# att_1 => gt_1, mul_1, where_1
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.1), kwargs = {})
# %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wi/cwitrieuob7aemxxbesnp2ltobphikbp4s6bc2nur5ntkgyvz573.py
# Topologically Sorted Source Nodes: [att_max_1, att_avg_1], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
# Source node to ATen node mapping:
# att_avg_1 => avg_pool2d_1
# att_max_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_2, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_2, [3, 3], [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_avg_pool2d_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x5 = (xindex // 16)
x3 = (xindex // 16384)
x6 = xindex % 16384
x7 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-33) + (2*x0) + (64*x5)), tmp10, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-32) + (2*x0) + (64*x5)), tmp16, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-31) + (2*x0) + (64*x5)), tmp23, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (64*x5)), tmp30, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (64*x5)), tmp33, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x5)), tmp36, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + (2*x0) + (64*x5)), tmp43, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x5)), tmp46, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x5)), tmp49, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.load(in_ptr0 + ((-33) + (2*x0) + (64*x5)), tmp10, eviction_policy='evict_last', other=0.0)
tmp78 = tl.load(in_ptr0 + ((-32) + (2*x0) + (64*x5)), tmp16, eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 + tmp77
tmp80 = tl.load(in_ptr0 + ((-31) + (2*x0) + (64*x5)), tmp23, eviction_policy='evict_last', other=0.0)
tmp81 = tmp80 + tmp79
tmp82 = tl.load(in_ptr0 + ((-1) + (2*x0) + (64*x5)), tmp30, eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 + tmp81
tmp84 = tl.load(in_ptr0 + ((2*x0) + (64*x5)), tmp33, eviction_policy='evict_last', other=0.0)
tmp85 = tmp84 + tmp83
tmp86 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x5)), tmp36, eviction_policy='evict_last', other=0.0)
tmp87 = tmp86 + tmp85
tmp88 = tl.load(in_ptr0 + (31 + (2*x0) + (64*x5)), tmp43, eviction_policy='evict_last', other=0.0)
tmp89 = tmp88 + tmp87
tmp90 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x5)), tmp46, eviction_policy='evict_last', other=0.0)
tmp91 = tmp90 + tmp89
tmp92 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x5)), tmp49, eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 + tmp91
tmp94 = 1 + ((-2)*x0) + ((-2)*x1) + (((33) * ((33) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (33)))*((33) * ((33) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (33)))) + ((-2)*x0*((33) * ((33) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (33)))) + ((-2)*x1*((33) * ((33) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (33)))) + (4*x0*x1) + ((33) * ((33) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (33))) + ((33) * ((33) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (33)))
tmp95 = tmp93 / tmp94
tl.store(out_ptr0 + (x6 + (32768*x3)), tmp51, None)
tl.store(out_ptr1 + (x7), tmp76, None)
tl.store(out_ptr2 + (x6 + (32768*x3)), tmp95, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ps/cpsewqbicuoqxf2x264fi3zf2otaraslkpf3pudwqti4zubc6nqy.py
# Topologically Sorted Source Nodes: [conv2d_3, att_L_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# att_L_1 => gt_3, mul_3, where_3
# conv2d_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.1), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4y/c4yi677k2khitgtsgyfed4k33ti5roqfeskmbxbx6lxxqvdbx2bm.py
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# att_L_3 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_5 = async_compile.triton('triton_poi_fused__to_copy_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/md/cmdzb3g7ta6wjq6r3fstpbzeaul3ol5l3xra37jbj2vydlmdmvsb.py
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# att_L_3 => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 15), kwargs = {})
triton_poi_fused_add_clamp_6 = async_compile.triton('triton_poi_fused_add_clamp_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bo/cbopvl3gqicrov7xowxovrkp6amyvpvxr7d7id6h2qyxy5qpmrr2.py
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# att_L_3 => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_5, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yn/cyn5bq4rjjfsj27txhrmibg4wg4pvzdzmkvocmaf3vrwrlw73gbe.py
# Topologically Sorted Source Nodes: [conv2d_4, att_L_2, att_L_3, conv2d_5, att_2, att_3], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# att_2 => gt_5, mul_10, where_5
# att_3 => add_7
# att_L_2 => gt_4, mul_4, where_4
# att_L_3 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_7, mul_8, mul_9, sub_3, sub_4, sub_6
# conv2d_4 => convolution_4
# conv2d_5 => convolution_5
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {})
# %where_4 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_4, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_4, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_4, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_4, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_7), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_8), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_9), kwargs = {})
# %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.1), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_10), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %add_6), kwargs = {})
# %gt_10 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_5, 0), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_8 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*i1', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x5 = (xindex // 1024)
x2 = (xindex // 1024) % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr8 + (x6), None)
tmp51 = tl.load(in_ptr9 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (16*tmp4) + (256*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + (16*tmp4) + (256*x5)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + (16*tmp33) + (256*x5)), None, eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + (16*tmp33) + (256*x5)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp52 = tmp50 + tmp51
tmp53 = tmp52 > tmp12
tmp54 = tmp52 * tmp14
tmp55 = tl.where(tmp53, tmp52, tmp54)
tmp56 = tmp29 + tmp49
tmp57 = tmp55 + tmp56
tmp58 = tmp55 > tmp12
tl.store(in_out_ptr0 + (x6), tmp57, None)
tl.store(out_ptr1 + (x6), tmp58, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gw/cgw3icbwzxdw6oxtxrkk3tzxsbo7tydhmh4cam2x4uk4up2ojyzd.py
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# att_5 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_9 = async_compile.triton('triton_poi_fused__to_copy_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_9(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/va/cvaqcdhhxoyrx4bu7qkswnyrg2mckxp3t7jdg7nh2lifnxa6yra4.py
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# att_5 => add_9, clamp_max_4
# Graph fragment:
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_9, 31), kwargs = {})
triton_poi_fused_add_clamp_10 = async_compile.triton('triton_poi_fused_add_clamp_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_10(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/m3/cm3mwdfktbrt6jdiqjt22dmzr7heqeyttrvfx6vci4crm7y2eab7.py
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# att_5 => add_8, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_12, sub_7, sub_9
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 0.5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_12, 0.5), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bs/cbs5yb4s45t3a7ow7apqqd7nqvtlsckjscrw2mwvyey243pdjdpv.py
# Topologically Sorted Source Nodes: [conv2d_6, att_4, att_5], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# att_4 => gt_6, mul_11, where_6
# att_5 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_12, add_13, add_14, mul_14, mul_15, mul_16, sub_10, sub_11, sub_13
# conv2d_6 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_7, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.1), kwargs = {})
# %where_6 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_11), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_6, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_6, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_6, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_6, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_14), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_15), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_16), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_12 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x6 = (xindex // 4096)
x2 = (xindex // 4096) % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + (32*tmp33) + (1024*x6)), None, eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + (32*tmp33) + (1024*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp50 = tmp29 + tmp49
tl.store(in_out_ptr0 + (x4), tmp50, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hc/chciwv3fhdg2mb3dqvrfhrfjzwg4gad6tze4ucz7d6ksvs4sr2ab.py
# Topologically Sorted Source Nodes: [att_6], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# att_6 => convolution_7
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_14, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bn/cbn3weotvzzbxtlrwmrpyan7fjhjwc3yf62pl6gezlg2u52hjyeq.py
# Topologically Sorted Source Nodes: [att_add, att_7, mul, mul_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# att_7 => sigmoid
# att_add => convolution_9
# mul => mul_18
# mul_1 => mul_19
# out => add_15
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_7, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_7,), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sigmoid), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, 2), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %convolution_9), kwargs = {})
triton_poi_fused_add_convolution_mul_sigmoid_14 = async_compile.triton('triton_poi_fused_add_convolution_mul_sigmoid_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sigmoid_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_sigmoid_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp6 = tl.load(in_out_ptr0 + (x3), None)
tmp7 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp8 = tmp6 + tmp7
tmp9 = tmp5 + tmp8
tl.store(in_out_ptr0 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aj/caj3tpd3emvbv3jknru7lgyv3lxor3fhvyv2b2yh6rwc6e3iaomw.py
# Topologically Sorted Source Nodes: [conv2d_6, att_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# att_4 => gt_6, mul_11, where_6
# conv2d_6 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_7, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.1), kwargs = {})
# %where_6 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_11), kwargs = {})
# %gt_9 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_6, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_15 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ke/ckedmbgtvjlsai65apguxb4fs3ztxpgbpud54kw22u7xq2cikzyg.py
# Topologically Sorted Source Nodes: [conv2d_4, att_L_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# att_L_2 => gt_4, mul_4, where_4
# conv2d_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {})
# %where_4 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
# %gt_11 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_4, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_16 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args
args.clear()
assert_size_stride(primals_1, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64, ), (1, ))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_19, (64, ), (1, ))
assert_size_stride(primals_20, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_21, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, att], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
buf5 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
buf2 = reinterpret_tensor(buf5, (4, 64, 32, 32), (131072, 1024, 32, 1), 0) # alias
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
buf4 = reinterpret_tensor(buf5, (4, 64, 32, 32), (131072, 1024, 32, 1), 65536) # alias
# Topologically Sorted Source Nodes: [att_max, att_avg], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_1.run(buf1, buf2, buf3, buf4, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, att_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_5, 262144, grid=grid(262144), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, att_L], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf9, primals_7, 262144, grid=grid(262144), stream=stream0)
del primals_7
buf13 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf10 = reinterpret_tensor(buf13, (4, 64, 16, 16), (32768, 256, 16, 1), 0) # alias
buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8)
buf12 = reinterpret_tensor(buf13, (4, 64, 16, 16), (32768, 256, 16, 1), 16384) # alias
# Topologically Sorted Source Nodes: [att_max_1, att_avg_1], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_3.run(buf9, buf10, buf11, buf12, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, att_L_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf15, primals_9, 65536, grid=grid(65536), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 16, 16), (16384, 256, 16, 1))
buf17 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_5.run(buf17, 32, grid=grid(32), stream=stream0)
buf18 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_6.run(buf18, 32, grid=grid(32), stream=stream0)
buf19 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_5.run(buf19, 32, grid=grid(32), stream=stream0)
buf20 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_6.run(buf20, 32, grid=grid(32), stream=stream0)
buf21 = empty_strided_cuda((32, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7.run(buf21, 32, grid=grid(32), stream=stream0)
buf23 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7.run(buf23, 32, grid=grid(32), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf7, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf24 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf26 = buf24; del buf24 # reuse
buf44 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, att_L_2, att_L_3, conv2d_5, att_2, att_3], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add, aten.leaky_relu_backward]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_8.run(buf26, buf17, buf19, buf16, primals_11, buf20, buf21, buf18, buf23, buf25, primals_13, buf44, 262144, grid=grid(262144), stream=stream0)
del buf25
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf28 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_9.run(buf28, 64, grid=grid(64), stream=stream0)
buf29 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_10.run(buf29, 64, grid=grid(64), stream=stream0)
buf30 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_9.run(buf30, 64, grid=grid(64), stream=stream0)
buf31 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_10.run(buf31, 64, grid=grid(64), stream=stream0)
buf32 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11.run(buf32, 64, grid=grid(64), stream=stream0)
buf34 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11.run(buf34, 64, grid=grid(64), stream=stream0)
buf35 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf36 = buf35; del buf35 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, att_4, att_5], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_12.run(buf36, buf28, buf30, buf27, primals_15, buf31, buf32, buf29, buf34, 1048576, grid=grid(1048576), stream=stream0)
# Topologically Sorted Source Nodes: [att_6], Original ATen: [aten.convolution]
buf37 = extern_kernels.convolution(buf36, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf38 = buf37; del buf37 # reuse
# Topologically Sorted Source Nodes: [att_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf38, primals_17, 1048576, grid=grid(1048576), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf38, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf40 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, leaky_relu_7], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf40, primals_19, 1048576, grid=grid(1048576), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [att_add], Original ATen: [aten.convolution]
buf41 = extern_kernels.convolution(buf40, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf42 = buf41; del buf41 # reuse
# Topologically Sorted Source Nodes: [att_add, att_7, mul, mul_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_sigmoid_14.run(buf42, primals_3, buf38, primals_21, 1048576, grid=grid(1048576), stream=stream0)
del primals_21
buf43 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_6, att_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_15.run(buf27, primals_15, buf43, 262144, grid=grid(262144), stream=stream0)
del buf27
del primals_15
buf45 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, att_L_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_16.run(buf16, primals_11, buf45, 65536, grid=grid(65536), stream=stream0)
del buf16
del primals_11
return (buf42, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf18, buf19, buf20, buf21, buf23, buf26, buf28, buf29, buf30, buf31, buf32, buf34, buf36, buf38, buf40, buf43, buf44, buf45, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, nf=64):
super(Attention, self).__init__()
self.sAtt_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)
self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
att = self.lrelu(self.sAtt_1(x))
att_max = self.max_pool(att)
att_avg = self.avg_pool(att)
att = self.lrelu(self.sAtt_2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L1(att))
att_max = self.max_pool(att_L)
att_avg = self.avg_pool(att_L)
att_L = self.lrelu(self.sAtt_L2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L3(att_L))
att_L = F.interpolate(att_L, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.lrelu(self.sAtt_3(att))
att = att + att_L
att = self.lrelu(self.sAtt_4(att))
att = F.interpolate(att, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.sAtt_5(att)
att_add = self.sAtt_add_2(self.lrelu(self.sAtt_add_1(att)))
att = torch.sigmoid(att)
out = x * att * 2 + att_add
return out
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_1(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x5 = xindex // 32
x3 = xindex // 65536
x6 = xindex % 65536
x7 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-65 + 2 * x0 + 128 * x5), tmp10,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-64 + 2 * x0 + 128 * x5), tmp16,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-63 + 2 * x0 + 128 * x5), tmp23,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 128 * x5), tmp30,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 128 * x5), tmp33, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x5), tmp36,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (63 + 2 * x0 + 128 * x5), tmp43,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x5), tmp46,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x5), tmp49,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.load(in_ptr0 + (-65 + 2 * x0 + 128 * x5), tmp10,
eviction_policy='evict_last', other=0.0)
tmp78 = tl.load(in_ptr0 + (-64 + 2 * x0 + 128 * x5), tmp16,
eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 + tmp77
tmp80 = tl.load(in_ptr0 + (-63 + 2 * x0 + 128 * x5), tmp23,
eviction_policy='evict_last', other=0.0)
tmp81 = tmp80 + tmp79
tmp82 = tl.load(in_ptr0 + (-1 + 2 * x0 + 128 * x5), tmp30,
eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 + tmp81
tmp84 = tl.load(in_ptr0 + (2 * x0 + 128 * x5), tmp33, eviction_policy=
'evict_last', other=0.0)
tmp85 = tmp84 + tmp83
tmp86 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x5), tmp36,
eviction_policy='evict_last', other=0.0)
tmp87 = tmp86 + tmp85
tmp88 = tl.load(in_ptr0 + (63 + 2 * x0 + 128 * x5), tmp43,
eviction_policy='evict_last', other=0.0)
tmp89 = tmp88 + tmp87
tmp90 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x5), tmp46,
eviction_policy='evict_last', other=0.0)
tmp91 = tmp90 + tmp89
tmp92 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x5), tmp49,
eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 + tmp91
tmp94 = 1 + -2 * x0 + -2 * x1 + (65 * (65 <= 2 + 2 * x0) + (2 + 2 * x0) *
(2 + 2 * x0 < 65)) * (65 * (65 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 +
2 * x1 < 65)) + -2 * x0 * (65 * (65 <= 2 + 2 * x1) + (2 + 2 * x1) *
(2 + 2 * x1 < 65)) + -2 * x1 * (65 * (65 <= 2 + 2 * x0) + (2 + 2 *
x0) * (2 + 2 * x0 < 65)) + 4 * x0 * x1 + (65 * (65 <= 2 + 2 * x0) +
(2 + 2 * x0) * (2 + 2 * x0 < 65)) + (65 * (65 <= 2 + 2 * x1) + (2 +
2 * x1) * (2 + 2 * x1 < 65))
tmp95 = tmp93 / tmp94
tl.store(out_ptr0 + (x6 + 131072 * x3), tmp51, None)
tl.store(out_ptr1 + x7, tmp76, None)
tl.store(out_ptr2 + (x6 + 131072 * x3), tmp95, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_3(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x5 = xindex // 16
x3 = xindex // 16384
x6 = xindex % 16384
x7 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-33 + 2 * x0 + 64 * x5), tmp10,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-32 + 2 * x0 + 64 * x5), tmp16,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-31 + 2 * x0 + 64 * x5), tmp23,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 64 * x5), tmp30,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 64 * x5), tmp33, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x5), tmp36,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + 2 * x0 + 64 * x5), tmp43,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x5), tmp46,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x5), tmp49,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.load(in_ptr0 + (-33 + 2 * x0 + 64 * x5), tmp10,
eviction_policy='evict_last', other=0.0)
tmp78 = tl.load(in_ptr0 + (-32 + 2 * x0 + 64 * x5), tmp16,
eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 + tmp77
tmp80 = tl.load(in_ptr0 + (-31 + 2 * x0 + 64 * x5), tmp23,
eviction_policy='evict_last', other=0.0)
tmp81 = tmp80 + tmp79
tmp82 = tl.load(in_ptr0 + (-1 + 2 * x0 + 64 * x5), tmp30,
eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 + tmp81
tmp84 = tl.load(in_ptr0 + (2 * x0 + 64 * x5), tmp33, eviction_policy=
'evict_last', other=0.0)
tmp85 = tmp84 + tmp83
tmp86 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x5), tmp36,
eviction_policy='evict_last', other=0.0)
tmp87 = tmp86 + tmp85
tmp88 = tl.load(in_ptr0 + (31 + 2 * x0 + 64 * x5), tmp43,
eviction_policy='evict_last', other=0.0)
tmp89 = tmp88 + tmp87
tmp90 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x5), tmp46,
eviction_policy='evict_last', other=0.0)
tmp91 = tmp90 + tmp89
tmp92 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x5), tmp49,
eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 + tmp91
tmp94 = 1 + -2 * x0 + -2 * x1 + (33 * (33 <= 2 + 2 * x0) + (2 + 2 * x0) *
(2 + 2 * x0 < 33)) * (33 * (33 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 +
2 * x1 < 33)) + -2 * x0 * (33 * (33 <= 2 + 2 * x1) + (2 + 2 * x1) *
(2 + 2 * x1 < 33)) + -2 * x1 * (33 * (33 <= 2 + 2 * x0) + (2 + 2 *
x0) * (2 + 2 * x0 < 33)) + 4 * x0 * x1 + (33 * (33 <= 2 + 2 * x0) +
(2 + 2 * x0) * (2 + 2 * x0 < 33)) + (33 * (33 <= 2 + 2 * x1) + (2 +
2 * x1) * (2 + 2 * x1 < 33))
tmp95 = tmp93 / tmp94
tl.store(out_ptr0 + (x6 + 32768 * x3), tmp51, None)
tl.store(out_ptr1 + x7, tmp76, None)
tl.store(out_ptr2 + (x6 + 32768 * x3), tmp95, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_8(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x5 = xindex // 1024
x2 = xindex // 1024 % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr8 + x6, None)
tmp51 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + 16 * tmp4 + 256 * x5), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + 16 * tmp33 + 256 * x5), None,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + 16 * tmp33 + 256 * x5), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp52 = tmp50 + tmp51
tmp53 = tmp52 > tmp12
tmp54 = tmp52 * tmp14
tmp55 = tl.where(tmp53, tmp52, tmp54)
tmp56 = tmp29 + tmp49
tmp57 = tmp55 + tmp56
tmp58 = tmp55 > tmp12
tl.store(in_out_ptr0 + x6, tmp57, None)
tl.store(out_ptr1 + x6, tmp58, None)
@triton.jit
def triton_poi_fused__to_copy_9(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_10(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_12(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x6 = xindex // 4096
x2 = xindex // 4096 % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + 32 * tmp33 + 1024 * x6), None,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + 32 * tmp33 + 1024 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp50 = tmp29 + tmp49
tl.store(in_out_ptr0 + x4, tmp50, None)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_add_convolution_mul_sigmoid_14(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp6 = tl.load(in_out_ptr0 + x3, None)
tmp7 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp8 = tmp6 + tmp7
tmp9 = tmp5 + tmp8
tl.store(in_out_ptr0 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_21, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf1,
primals_2, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf5 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
buf2 = reinterpret_tensor(buf5, (4, 64, 32, 32), (131072, 1024, 32,
1), 0)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
buf4 = reinterpret_tensor(buf5, (4, 64, 32, 32), (131072, 1024, 32,
1), 65536)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_1[grid(262144)](
buf1, buf2, buf3, buf4, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7,
primals_5, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf9,
primals_7, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf10 = reinterpret_tensor(buf13, (4, 64, 16, 16), (32768, 256, 16,
1), 0)
buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.int8)
buf12 = reinterpret_tensor(buf13, (4, 64, 16, 16), (32768, 256, 16,
1), 16384)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_3[grid(65536)](buf9
, buf10, buf11, buf12, 65536, XBLOCK=256, num_warps=4, num_stages=1
)
buf14 = extern_kernels.convolution(buf13, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_leaky_relu_4[grid(65536)](buf15,
primals_9, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 16, 16), (16384, 256, 16, 1))
buf17 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_5[grid(32)](buf17, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_6[grid(32)](buf18, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_5[grid(32)](buf19, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_add_clamp_6[grid(32)](buf20, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((32,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(32)](buf21,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(32)](buf23,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf25 = extern_kernels.convolution(buf7, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf24 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf26 = buf24
del buf24
buf44 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_8[
grid(262144)](buf26, buf17, buf19, buf16, primals_11, buf20,
buf21, buf18, buf23, buf25, primals_13, buf44, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del buf25
del primals_13
buf27 = extern_kernels.convolution(buf26, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf28 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_9[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_10[grid(64)](buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_9[grid(64)](buf30, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_10[grid(64)](buf31, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf32,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf34,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf36 = buf35
del buf35
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_12[
grid(1048576)](buf36, buf28, buf30, buf27, primals_15, buf31,
buf32, buf29, buf34, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
buf37 = extern_kernels.convolution(buf36, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf38 = buf37
del buf37
triton_poi_fused_convolution_13[grid(1048576)](buf38, primals_17,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf39 = extern_kernels.convolution(buf38, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf40 = buf39
del buf39
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf40,
primals_19, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf41 = extern_kernels.convolution(buf40, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf42 = buf41
del buf41
triton_poi_fused_add_convolution_mul_sigmoid_14[grid(1048576)](buf42,
primals_3, buf38, primals_21, 1048576, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_21
buf43 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_15[grid
(262144)](buf27, primals_15, buf43, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf27
del primals_15
buf45 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_16[grid
(65536)](buf16, primals_11, buf45, 65536, XBLOCK=512, num_warps
=4, num_stages=1)
del buf16
del primals_11
return (buf42, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15,
buf17, buf18, buf19, buf20, buf21, buf23, buf26, buf28, buf29,
buf30, buf31, buf32, buf34, buf36, buf38, buf40, buf43, buf44, buf45)
class AttentionNew(nn.Module):
def __init__(self, nf=64):
super(AttentionNew, self).__init__()
self.sAtt_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)
self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input_0):
primals_1 = self.sAtt_1.weight
primals_2 = self.sAtt_1.bias
primals_4 = self.sAtt_2.weight
primals_5 = self.sAtt_2.bias
primals_10 = self.sAtt_3.weight
primals_7 = self.sAtt_3.bias
primals_6 = self.sAtt_4.weight
primals_9 = self.sAtt_4.bias
primals_12 = self.sAtt_5.weight
primals_11 = self.sAtt_5.bias
primals_14 = self.sAtt_L1.weight
primals_13 = self.sAtt_L1.bias
primals_8 = self.sAtt_L2.weight
primals_15 = self.sAtt_L2.bias
primals_16 = self.sAtt_L3.weight
primals_17 = self.sAtt_L3.bias
primals_18 = self.sAtt_add_1.weight
primals_19 = self.sAtt_add_1.bias
primals_20 = self.sAtt_add_2.weight
primals_21 = self.sAtt_add_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
| juyongjiang/Simple-SR | Attention | false | 7,044 | [
"MIT"
] | 1 | 76820511abc04fbe6e4a79d23c67aee97406d563 | https://github.com/juyongjiang/Simple-SR/tree/76820511abc04fbe6e4a79d23c67aee97406d563 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, nf=64):
super().__init__()
self.sAtt_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)
self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
att = self.lrelu(self.sAtt_1(x))
att_max = self.max_pool(att)
att_avg = self.avg_pool(att)
att = self.lrelu(self.sAtt_2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L1(att))
att_max = self.max_pool(att_L)
att_avg = self.avg_pool(att_L)
att_L = self.lrelu(self.sAtt_L2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L3(att_L))
att_L = F.interpolate(att_L, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.lrelu(self.sAtt_3(att))
att = att + att_L
att = self.lrelu(self.sAtt_4(att))
att = F.interpolate(att, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.sAtt_5(att)
att_add = self.sAtt_add_2(self.lrelu(self.sAtt_add_1(att)))
att = torch.sigmoid(att)
out = x * att * 2 + att_add
return out
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return []
|
MSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pt/cpt3g3mneau7r5slrkq52wlrq75djv57emcfuqlk4efiteeas2wr.py
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# pow_1 => pow_1
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused_mean_pow_sub_sum_0 = async_compile.triton('triton_per_fused_mean_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tmp22 = 64.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class MSELoss(nn.Module):
def __init__(self):
super(self.__class__, self).__init__()
def forward(self, input, target):
return torch.mean(torch.sum((input - target) ** 2, 1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tmp22 = 64.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1,
1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSELossNew(nn.Module):
def __init__(self):
super(self.__class__, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| klovbe/UnsupervisedDeepLearning-Pytorch | MSELoss | false | 7,045 | [
"MIT"
] | 1 | 35e8e49cd4024179db173f3dab2e6d1a5d037d35 | https://github.com/klovbe/UnsupervisedDeepLearning-Pytorch/tree/35e8e49cd4024179db173f3dab2e6d1a5d037d35 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super(self.__class__, self).__init__()
def forward(self, input, target):
return torch.mean(torch.sum((input - target) ** 2, 1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/j4/cj4oxxwi45avylvesxrmgefqefihnfgwupsrwozfy4jc4yhfheyr.py
# Topologically Sorted Source Nodes: [clamp, log, mul, sub, sub_1, clamp_1, log_1, mul_1, add, sum_1, mean, neg], Original ATen: [aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sum, aten.mean, aten.neg]
# Source node to ATen node mapping:
# add => add
# clamp => clamp_min
# clamp_1 => clamp_min_1
# log => log
# log_1 => log_1
# mean => mean
# mul => mul
# mul_1 => mul_1
# neg => neg
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-10), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %log), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 1e-10), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
triton_per_fused_add_clamp_log_mean_mul_neg_rsub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_log_mean_mul_neg_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_log_mean_mul_neg_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_log_mean_mul_neg_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp25 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp26 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp37 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp38 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = 1e-10
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tl_math.log(tmp3)
tmp5 = tmp0 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp0
tmp8 = tmp6 - tmp1
tmp9 = triton_helpers.maximum(tmp8, tmp2)
tmp10 = tl_math.log(tmp9)
tmp11 = tmp7 * tmp10
tmp12 = tmp5 + tmp11
tmp15 = triton_helpers.maximum(tmp14, tmp2)
tmp16 = tl_math.log(tmp15)
tmp17 = tmp13 * tmp16
tmp18 = tmp6 - tmp13
tmp19 = tmp6 - tmp14
tmp20 = triton_helpers.maximum(tmp19, tmp2)
tmp21 = tl_math.log(tmp20)
tmp22 = tmp18 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = tmp12 + tmp23
tmp27 = triton_helpers.maximum(tmp26, tmp2)
tmp28 = tl_math.log(tmp27)
tmp29 = tmp25 * tmp28
tmp30 = tmp6 - tmp25
tmp31 = tmp6 - tmp26
tmp32 = triton_helpers.maximum(tmp31, tmp2)
tmp33 = tl_math.log(tmp32)
tmp34 = tmp30 * tmp33
tmp35 = tmp29 + tmp34
tmp36 = tmp24 + tmp35
tmp39 = triton_helpers.maximum(tmp38, tmp2)
tmp40 = tl_math.log(tmp39)
tmp41 = tmp37 * tmp40
tmp42 = tmp6 - tmp37
tmp43 = tmp6 - tmp38
tmp44 = triton_helpers.maximum(tmp43, tmp2)
tmp45 = tl_math.log(tmp44)
tmp46 = tmp42 * tmp45
tmp47 = tmp41 + tmp46
tmp48 = tmp36 + tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 64.0
tmp53 = tmp51 / tmp52
tmp54 = -tmp53
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp54, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [clamp, log, mul, sub, sub_1, clamp_1, log_1, mul_1, add, sum_1, mean, neg], Original ATen: [aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sum, aten.mean, aten.neg]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_log_mean_mul_neg_rsub_sum_0.run(buf2, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BCELoss(nn.Module):
def __init__(self):
super(self.__class__, self).__init__()
def forward(self, input, target):
return -torch.mean(torch.sum(target * torch.log(torch.clamp(input,
min=1e-10)) + (1 - target) * torch.log(torch.clamp(1 - input,
min=1e-10)), 1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_log_mean_mul_neg_rsub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp37 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp38 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = 1e-10
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tl_math.log(tmp3)
tmp5 = tmp0 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp0
tmp8 = tmp6 - tmp1
tmp9 = triton_helpers.maximum(tmp8, tmp2)
tmp10 = tl_math.log(tmp9)
tmp11 = tmp7 * tmp10
tmp12 = tmp5 + tmp11
tmp15 = triton_helpers.maximum(tmp14, tmp2)
tmp16 = tl_math.log(tmp15)
tmp17 = tmp13 * tmp16
tmp18 = tmp6 - tmp13
tmp19 = tmp6 - tmp14
tmp20 = triton_helpers.maximum(tmp19, tmp2)
tmp21 = tl_math.log(tmp20)
tmp22 = tmp18 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = tmp12 + tmp23
tmp27 = triton_helpers.maximum(tmp26, tmp2)
tmp28 = tl_math.log(tmp27)
tmp29 = tmp25 * tmp28
tmp30 = tmp6 - tmp25
tmp31 = tmp6 - tmp26
tmp32 = triton_helpers.maximum(tmp31, tmp2)
tmp33 = tl_math.log(tmp32)
tmp34 = tmp30 * tmp33
tmp35 = tmp29 + tmp34
tmp36 = tmp24 + tmp35
tmp39 = triton_helpers.maximum(tmp38, tmp2)
tmp40 = tl_math.log(tmp39)
tmp41 = tmp37 * tmp40
tmp42 = tmp6 - tmp37
tmp43 = tmp6 - tmp38
tmp44 = triton_helpers.maximum(tmp43, tmp2)
tmp45 = tl_math.log(tmp44)
tmp46 = tmp42 * tmp45
tmp47 = tmp41 + tmp46
tmp48 = tmp36 + tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 64.0
tmp53 = tmp51 / tmp52
tmp54 = -tmp53
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_clamp_log_mean_mul_neg_rsub_sum_0[grid(1)](buf2,
arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class BCELossNew(nn.Module):
def __init__(self):
super(self.__class__, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| klovbe/UnsupervisedDeepLearning-Pytorch | BCELoss | false | 7,046 | [
"MIT"
] | 1 | 35e8e49cd4024179db173f3dab2e6d1a5d037d35 | https://github.com/klovbe/UnsupervisedDeepLearning-Pytorch/tree/35e8e49cd4024179db173f3dab2e6d1a5d037d35 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super(self.__class__, self).__init__()
def forward(self, input, target):
return -torch.mean(torch.sum(target * torch.log(torch.clamp(input,
min=1e-10)) + (1 - target) * torch.log(torch.clamp(1 - input,
min=1e-10)), 1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MaxMarginCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dr/cdrckjisfb4cbpsf3e426dedfjbm6nkrtn4zum2xlgv5uihfw464.py
# Topologically Sorted Source Nodes: [add, sub, clamp, visual_rank_loss, add_1, sub_1, clamp_1, lang_rank_loss, add_2, sum_1, loss], Original ATen: [aten.add, aten.sub, aten.clamp, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# clamp => clamp_min
# clamp_1 => clamp_min_1
# lang_rank_loss => mul_1
# loss => div
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# visual_rank_loss => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %slice_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_3, 4), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %slice_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min_1, 4), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 1), kwargs = {})
triton_per_fused_add_clamp_div_mul_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_div_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_div_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_div_mul_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + (64 + r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (128 + r2), None)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tmp6 * tmp1
tmp9 = tmp8 + tmp1
tmp10 = tmp9 - tmp3
tmp11 = triton_helpers.maximum(tmp10, tmp5)
tmp12 = tmp11 * tmp1
tmp13 = tmp7 + tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = 1.0
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, sub, clamp, visual_rank_loss, add_1, sub_1, clamp_1, lang_rank_loss, add_2, sum_1, loss], Original ATen: [aten.add, aten.sub, aten.clamp, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_div_mul_sub_sum_0.run(buf1, arg0_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaxMarginCriterion(nn.Module):
def __init__(self, visual_rank_weight, lang_rank_weight, margin):
super(MaxMarginCriterion, self).__init__()
self.visual_rank = visual_rank_weight > 0
self.lang_rank = lang_rank_weight > 0
self.visual_rank_weight = visual_rank_weight
self.lang_rank_weight = lang_rank_weight
self.margin = margin
def forward(self, cossim):
N = cossim.size(0)
batch_size = 0
if self.visual_rank and not self.lang_rank:
batch_size = N // 2
assert isinstance(batch_size, int)
paired = cossim[:batch_size]
unpaired = cossim[batch_size:]
visual_rank_loss = self.visual_rank_weight * torch.clamp(self.
margin + unpaired - paired, min=0)
lang_rank_loss = 0.0
elif not self.visual_rank and self.lang_rank:
batch_size = N // 2
assert isinstance(batch_size, int)
cossim[:batch_size]
unpaired = cossim[batch_size:]
lang_rank_loss = self.lang_rank_weight * torch.clamp(self.
margin + unpaired - paired, min=0)
visual_rank_loss = 0.0
elif self.visual_rank and self.lang_rank:
batch_size = N // 3
assert isinstance(batch_size, int)
paired = cossim[:batch_size]
visual_unpaired = cossim[batch_size:batch_size * 2]
lang_unpaired = cossim[batch_size * 2:]
visual_rank_loss = self.visual_rank_weight * torch.clamp(self.
margin + visual_unpaired - paired, 0)
lang_rank_loss = self.lang_rank_weight * torch.clamp(self.
margin + lang_unpaired - paired, 0)
else:
raise NotImplementedError
loss = (visual_rank_loss + lang_rank_loss).sum() / batch_size
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'visual_rank_weight': 4, 'lang_rank_weight': 4, 'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_mul_sub_sum_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + (64 + r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (128 + r2), None)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tmp6 * tmp1
tmp9 = tmp8 + tmp1
tmp10 = tmp9 - tmp3
tmp11 = triton_helpers.maximum(tmp10, tmp5)
tmp12 = tmp11 * tmp1
tmp13 = tmp7 + tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = 1.0
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_div_mul_sub_sum_0[grid(1)](buf1, arg0_1,
1, 128, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class MaxMarginCriterionNew(nn.Module):
def __init__(self, visual_rank_weight, lang_rank_weight, margin):
super(MaxMarginCriterionNew, self).__init__()
self.visual_rank = visual_rank_weight > 0
self.lang_rank = lang_rank_weight > 0
self.visual_rank_weight = visual_rank_weight
self.lang_rank_weight = lang_rank_weight
self.margin = margin
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| kmario23/MAttNet | MaxMarginCriterion | false | 7,047 | [
"MIT"
] | 1 | 0d66321eb5dc9c8523a5ebf45f608b0672b051ab | https://github.com/kmario23/MAttNet/tree/0d66321eb5dc9c8523a5ebf45f608b0672b051ab | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, visual_rank_weight, lang_rank_weight, margin):
super().__init__()
self.visual_rank = visual_rank_weight > 0
self.lang_rank = lang_rank_weight > 0
self.visual_rank_weight = visual_rank_weight
self.lang_rank_weight = lang_rank_weight
self.margin = margin
def forward(self, cossim):
N = cossim.size(0)
batch_size = 0
if self.visual_rank and not self.lang_rank:
batch_size = N // 2
assert isinstance(batch_size, int)
paired = cossim[:batch_size]
unpaired = cossim[batch_size:]
visual_rank_loss = self.visual_rank_weight * torch.clamp(self.
margin + unpaired - paired, min=0)
lang_rank_loss = 0.0
elif not self.visual_rank and self.lang_rank:
batch_size = N // 2
assert isinstance(batch_size, int)
cossim[:batch_size]
unpaired = cossim[batch_size:]
lang_rank_loss = self.lang_rank_weight * torch.clamp(self.
margin + unpaired - paired, min=0)
visual_rank_loss = 0.0
elif self.visual_rank and self.lang_rank:
batch_size = N // 3
assert isinstance(batch_size, int)
paired = cossim[:batch_size]
visual_unpaired = cossim[batch_size:batch_size * 2]
lang_unpaired = cossim[batch_size * 2:]
visual_rank_loss = self.visual_rank_weight * torch.clamp(self.
margin + visual_unpaired - paired, 0)
lang_rank_loss = self.lang_rank_weight * torch.clamp(self.
margin + lang_unpaired - paired, 0)
else:
raise NotImplementedError
loss = (visual_rank_loss + lang_rank_loss).sum() / batch_size
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
TanhTransform | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2u/c2uiw7ahbyyn2tpdoqsbcj7uovvdxeffmrw65tlyupfobji5idrg.py
# Topologically Sorted Source Nodes: [tanh, add, mul, out], Original ATen: [aten.tanh, aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# out => add_1
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
triton_poi_fused_add_mul_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = 0.0
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, add, mul, out], Original ATen: [aten.tanh, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def arctanh(x, eps=1e-06):
"""
Calculates the inverse hyperbolic tangent.
"""
x *= 1.0 - eps
return torch.log((1 + x) / (1 - x)) * 0.5
class TanhTransform(nn.Module):
"""
Computes the tanh transform used to
remove box constraints from C&W paper
NOTE: This reparamterization trick is
highly numerically unstable even for small-ish
values so should really only be used
for inputs that are bounded above or below
by relatively small values
Args:
xmin (float or torch.Tensor):
the lower bound for input values
should either be a float or broadcastable
with the input tensor where each element
in the tensor corresponds to the lower
bound of an input feature
xmax (float or torch.Tensor):
the lower bound for input values
should either be a float or broadcastable
with the input tensor where each element
in the tensor corresponds to the upper
bound of an input feature
"""
def __init__(self, xmin=0, xmax=1):
super(TanhTransform, self).__init__()
delta = xmax - xmin
self.delta_2 = delta / 2
self.xmax = xmax
self.xmin = xmin
def forward(self, x):
out = (x.tanh() + 1) * self.delta_2 + self.xmin
return out
def invert_forward(self, x):
z = (x - self.xmin) / self.delta_2 - 1
return arctanh(z)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = 0.0
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def arctanh(x, eps=1e-06):
"""
Calculates the inverse hyperbolic tangent.
"""
x *= 1.0 - eps
return torch.log((1 + x) / (1 - x)) * 0.5
class TanhTransformNew(nn.Module):
"""
Computes the tanh transform used to
remove box constraints from C&W paper
NOTE: This reparamterization trick is
highly numerically unstable even for small-ish
values so should really only be used
for inputs that are bounded above or below
by relatively small values
Args:
xmin (float or torch.Tensor):
the lower bound for input values
should either be a float or broadcastable
with the input tensor where each element
in the tensor corresponds to the lower
bound of an input feature
xmax (float or torch.Tensor):
the lower bound for input values
should either be a float or broadcastable
with the input tensor where each element
in the tensor corresponds to the upper
bound of an input feature
"""
def __init__(self, xmin=0, xmax=1):
super(TanhTransformNew, self).__init__()
delta = xmax - xmin
self.delta_2 = delta / 2
self.xmax = xmax
self.xmin = xmin
def invert_forward(self, x):
z = (x - self.xmin) / self.delta_2 - 1
return arctanh(z)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| knalbant/oppel | TanhTransform | false | 7,048 | [
"MIT"
] | 1 | 03f840565ef64587ddb7a8b4145d8df7fb0279a3 | https://github.com/knalbant/oppel/tree/03f840565ef64587ddb7a8b4145d8df7fb0279a3 | import torch
import torch.nn as nn
def arctanh(x, eps=1e-06):
"""
Calculates the inverse hyperbolic tangent.
"""
x *= 1.0 - eps
return torch.log((1 + x) / (1 - x)) * 0.5
class Model(nn.Module):
"""
Computes the tanh transform used to
remove box constraints from C&W paper
NOTE: This reparamterization trick is
highly numerically unstable even for small-ish
values so should really only be used
for inputs that are bounded above or below
by relatively small values
Args:
xmin (float or torch.Tensor):
the lower bound for input values
should either be a float or broadcastable
with the input tensor where each element
in the tensor corresponds to the lower
bound of an input feature
xmax (float or torch.Tensor):
the lower bound for input values
should either be a float or broadcastable
with the input tensor where each element
in the tensor corresponds to the upper
bound of an input feature
"""
def __init__(self, xmin=0, xmax=1):
super().__init__()
delta = xmax - xmin
self.delta_2 = delta / 2
self.xmax = xmax
self.xmin = xmin
def forward(self, x):
out = (x.tanh() + 1) * self.delta_2 + self.xmin
return out
def invert_forward(self, x):
z = (x - self.xmin) / self.delta_2 - 1
return arctanh(z)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BCEFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ha/chabyh2koocahqamiieikzhmqd6uvzkkiiqoydh5ojfekilhswkl.py
# Topologically Sorted Source Nodes: [mul, probas, sub, pow_1, mul_1, bce_loss, mul_2, sub_1, pow_2, mul_3, mul_4, loss, loss_1], Original ATen: [aten.mul, aten.sigmoid, aten.rsub, aten.pow, aten.binary_cross_entropy_with_logits, aten.add, aten.mean]
# Source node to ATen node mapping:
# bce_loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# loss => add
# loss_1 => mean
# mul => mul_1
# mul_1 => mul_2
# mul_2 => mul_3
# mul_3 => mul_4
# mul_4 => mul_5
# pow_1 => pow_1
# pow_2 => pow_2
# probas => sigmoid
# sub => sub_3
# sub_1 => sub_4
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.25), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %pow_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %sub_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %pow_2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %sub_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp5 - tmp0
tmp10 = tmp9 * tmp3
tmp11 = 0.0
tmp12 = triton_helpers.minimum(tmp11, tmp3)
tmp13 = tl_math.abs(tmp3)
tmp14 = -tmp13
tmp15 = tl_math.exp(tmp14)
tmp16 = libdevice.log1p(tmp15)
tmp17 = tmp12 - tmp16
tmp18 = tmp10 - tmp17
tmp19 = tmp8 * tmp18
tmp20 = tmp4 * tmp4
tmp21 = tmp9 * tmp20
tmp22 = tmp21 * tmp18
tmp23 = tmp19 + tmp22
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, probas, sub, pow_1, mul_1, bce_loss, mul_2, sub_1, pow_2, mul_3, mul_4, loss, loss_1], Original ATen: [aten.mul, aten.sigmoid, aten.rsub, aten.pow, aten.binary_cross_entropy_with_logits, aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BCEFocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * (1.0 - probas
) ** self.gamma * bce_loss + (1.0 - targets
) * probas ** self.gamma * bce_loss
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp5 - tmp0
tmp10 = tmp9 * tmp3
tmp11 = 0.0
tmp12 = triton_helpers.minimum(tmp11, tmp3)
tmp13 = tl_math.abs(tmp3)
tmp14 = -tmp13
tmp15 = tl_math.exp(tmp14)
tmp16 = libdevice.log1p(tmp15)
tmp17 = tmp12 - tmp16
tmp18 = tmp10 - tmp17
tmp19 = tmp8 * tmp18
tmp20 = tmp4 * tmp4
tmp21 = tmp9 * tmp20
tmp22 = tmp21 * tmp18
tmp23 = tmp19 + tmp22
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[
grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEFocalLossNew(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| koukyo1994/riadd-competition | BCEFocalLoss | false | 7,049 | [
"MIT"
] | 1 | 0e399305aef21d40125cadccee55be1f0b310216 | https://github.com/koukyo1994/riadd-competition/tree/0e399305aef21d40125cadccee55be1f0b310216 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * (1.0 - probas
) ** self.gamma * bce_loss + (1.0 - targets
) * probas ** self.gamma * bce_loss
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LSEPLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cz/ccz3mqay2w4lq4ij6e3tbcm2zs2h4dw3raeu4rxxshf35zxkmmzh.py
# Topologically Sorted Source Nodes: [max_1, differences_2, exp, exps, sum_1], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum]
# Source node to ATen node mapping:
# differences_2 => sub_1
# exp => exp
# exps => mul
# max_1 => max_1
# sum_1 => sum_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1, True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
triton_per_fused_exp_max_mul_sub_sum_0 = async_compile.triton('triton_per_fused_exp_max_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_exp_max_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 4
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((64*x0) + (r1 % 64)), None)
tmp1 = tl.load(in_ptr0 + ((16*(r1 // 64)) + (64*x0) + (r1 % 16)), None)
tmp8 = tl.load(in_ptr1 + ((64*x0) + (r1 % 64)), None)
tmp9 = tl.load(in_ptr1 + ((16*(r1 // 64)) + (64*x0) + (r1 % 16)), None)
tmp2 = tmp0 - tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp10 = tmp8 < tmp9
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp7 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tl.store(out_ptr0 + (x0), tmp5, None)
tl.store(out_ptr1 + (x0), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o7/co755tglhy3eavlx5pegfdmy36akzbmekxaszx5knjrxhhqk72o7.py
# Topologically Sorted Source Nodes: [neg, exp_1, add, log, lsep, mean], Original ATen: [aten.neg, aten.exp, aten.add, aten.log, aten.mean]
# Source node to ATen node mapping:
# add => add
# exp_1 => exp_1
# log => log
# lsep => add_1
# mean => mean
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, %sum_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %log), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_exp_log_mean_neg_1 = async_compile.triton('triton_per_fused_add_exp_log_mean_neg_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_log_mean_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_exp_log_mean_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 4)
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp2 = tl_math.exp(tmp1)
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp0 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 16.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [max_1, differences_2, exp, exps, sum_1], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_exp_max_mul_sub_sum_0.run(arg0_1, arg1_1, buf0, buf2, 4, 256, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [neg, exp_1, add, log, lsep, mean], Original ATen: [aten.neg, aten.exp, aten.add, aten.log, aten.mean]
triton_per_fused_add_exp_log_mean_neg_1.run(buf4, buf0, buf2, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def lsep_loss_stable(input, target, average=True):
n = input.size(0)
differences = input.unsqueeze(1) - input.unsqueeze(2)
where_lower = (target.unsqueeze(1) < target.unsqueeze(2)).float()
differences = differences.view(n, -1)
where_lower = where_lower.view(n, -1)
max_difference, _index = torch.max(differences, dim=1, keepdim=True)
differences = differences - max_difference
exps = differences.exp() * where_lower
lsep = max_difference + torch.log(torch.exp(-max_difference) + exps.sum(-1)
)
if average:
return lsep.mean()
else:
return lsep
class LSEPLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, preds, targets):
return lsep_loss_stable(preds, targets)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (64 * x0 + r1 % 64), None)
tmp1 = tl.load(in_ptr0 + (16 * (r1 // 64) + 64 * x0 + r1 % 16), None)
tmp8 = tl.load(in_ptr1 + (64 * x0 + r1 % 64), None)
tmp9 = tl.load(in_ptr1 + (16 * (r1 // 64) + 64 * x0 + r1 % 16), None)
tmp2 = tmp0 - tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp10 = tmp8 < tmp9
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp7 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tl.store(out_ptr0 + x0, tmp5, None)
tl.store(out_ptr1 + x0, tmp15, None)
@triton.jit
def triton_per_fused_add_exp_log_mean_neg_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp2 = tl_math.exp(tmp1)
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp0 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 16.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_exp_max_mul_sub_sum_0[grid(4)](arg0_1, arg1_1,
buf0, buf2, 4, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_exp_log_mean_neg_1[grid(1)](buf4, buf0, buf2,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
def lsep_loss_stable(input, target, average=True):
n = input.size(0)
differences = input.unsqueeze(1) - input.unsqueeze(2)
where_lower = (target.unsqueeze(1) < target.unsqueeze(2)).float()
differences = differences.view(n, -1)
where_lower = where_lower.view(n, -1)
max_difference, _index = torch.max(differences, dim=1, keepdim=True)
differences = differences - max_difference
exps = differences.exp() * where_lower
lsep = max_difference + torch.log(torch.exp(-max_difference) + exps.sum(-1)
)
if average:
return lsep.mean()
else:
return lsep
class LSEPLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| koukyo1994/riadd-competition | LSEPLoss | false | 7,050 | [
"MIT"
] | 1 | 0e399305aef21d40125cadccee55be1f0b310216 | https://github.com/koukyo1994/riadd-competition/tree/0e399305aef21d40125cadccee55be1f0b310216 | import torch
import torch.nn as nn
def lsep_loss_stable(input, target, average=True):
n = input.size(0)
differences = input.unsqueeze(1) - input.unsqueeze(2)
where_lower = (target.unsqueeze(1) < target.unsqueeze(2)).float()
differences = differences.view(n, -1)
where_lower = where_lower.view(n, -1)
max_difference, _index = torch.max(differences, dim=1, keepdim=True)
differences = differences - max_difference
exps = differences.exp() * where_lower
lsep = max_difference + torch.log(torch.exp(-max_difference) + exps.sum(-1)
)
if average:
return lsep.mean()
else:
return lsep
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, preds, targets):
return lsep_loss_stable(preds, targets)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SwaVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3g/c3gd5ipjfbufzu6hvujxufg6z3emufd62cvcqjyy3muqew2xvzbd.py
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pb/cpb5rsib7fg2llnv7dg2enzvdhrqqrlufmxvryadr6zckhtmyysm.py
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_2, [1], True), kwargs = {})
triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ay/cayipr5tzejzfvzl7ba3w7cqhf2axor2ag2cqam22ambv3zsqfqn.py
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_3 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_6, [0], True), kwargs = {})
triton_poi_fused_sum_2 = async_compile.triton('triton_poi_fused_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gf/cgfazley3l5rdd6ypj2skdwbxoltt6cdo3d3mpt4z3kqywdlsil5.py
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_1 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_10, [1], True), kwargs = {})
triton_poi_fused_sum_3 = async_compile.triton('triton_poi_fused_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/im/cimyq4ncn2lt7wfmnkrejd4t7hctagm26a6wsjpob7y2e6gdebuy.py
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_7 => div_7
# Graph fragment:
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_12, 4), kwargs = {})
triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f4/cf4q6mh4hv4abtcbirw4vkce7au36jgqitbjdciddckwumhdqyiq.py
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_9 => div_9
# Graph fragment:
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_16, 4), kwargs = {})
triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zl/czlhaiakbynkvvwkmvsahwmxzw7xdccr5ho7bxsxvn4m725ewzpe.py
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_11 => div_11
# Graph fragment:
# %div_11 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_20, 4), kwargs = {})
triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zq/czqcd5dzpu5vwblosfp6mrfhziw4van5syy52ogdznuvysvejglt.py
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# Q_14 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_26, 4), kwargs = {})
triton_poi_fused_mul_7 = async_compile.triton('triton_poi_fused_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2o/c2oziyswbkb6ooscmnox5xojo4drqahfdvmtgtzz5unwazisafb7.py
# Topologically Sorted Source Nodes: [sum_Q_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q_1 => sum_22
# Graph fragment:
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute_32,), kwargs = {})
# %mul_tensor_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, 1), kwargs = {})
# %amax_default_12 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_12, [1], True), kwargs = {})
# %sub_tensor_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_12, %amax_default_12), kwargs = {})
# %div_tensor_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_12, 0.1), kwargs = {})
# %mul_tensor_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_26, 1), kwargs = {})
# %amax_default_5 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_5, [1], True), kwargs = {})
# %sub_tensor_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_5, %amax_default_5), kwargs = {})
# %div_tensor_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_5, 0.1), kwargs = {})
triton_per_fused_sum_8 = async_compile.triton('triton_per_fused_sum_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (16 + r0), None)
tmp9 = tl.load(in_ptr0 + (16 + (4*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (17 + (4*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (18 + (4*r2)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (19 + (4*r2)), None, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gx/cgxhyecrtx7u6qjrmuu7kg45b4dlhwl7jr6ztpezyzreqxtwfd5r.py
# Topologically Sorted Source Nodes: [sum_of_rows_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_3 => sum_23
# Graph fragment:
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_34, [1], True), kwargs = {})
triton_poi_fused_sum_9 = async_compile.triton('triton_poi_fused_sum_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gg/cggk3re7n2vchxwvrujwqfrtrkzz53k2ysszjkctmcorctbnrwjn.py
# Topologically Sorted Source Nodes: [sum_17], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_17 => sum_24
# Graph fragment:
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_38, [0], True), kwargs = {})
triton_poi_fused_sum_10 = async_compile.triton('triton_poi_fused_sum_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (17 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (18 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (19 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ys/cysm4kiitlxgr4z6f4ub5rd2focy27gqcmg2rwqbgxxtuy6mldou.py
# Topologically Sorted Source Nodes: [sum_of_rows_4], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_4 => sum_25
# Graph fragment:
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_42, [1], True), kwargs = {})
triton_poi_fused_sum_11 = async_compile.triton('triton_poi_fused_sum_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/d3/cd3gefgcdmhdfyughxzotbsun6fbsyeopog67ravaw3wghfygqhr.py
# Topologically Sorted Source Nodes: [Q_22], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_22 => div_29
# Graph fragment:
# %mul_tensor_27 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, 1), kwargs = {})
# %amax_default_27 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_27, [1], True), kwargs = {})
# %sub_tensor_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_27, %amax_default_27), kwargs = {})
# %div_tensor_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_27, 0.1), kwargs = {})
# %div_29 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_44, 4), kwargs = {})
triton_poi_fused_div_12 = async_compile.triton('triton_poi_fused_div_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (18 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (19 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (0))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tt/cttg2lnxpoxay4r27e6yymultf5wx23hbwstvoohxvcwcvlv7lne.py
# Topologically Sorted Source Nodes: [sum_Q_2], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q_2 => sum_43
# Graph fragment:
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute_64,), kwargs = {})
# %mul_tensor_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_27, 1), kwargs = {})
# %amax_default_4 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_4, [1], True), kwargs = {})
# %sub_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_4, %amax_default_4), kwargs = {})
# %div_tensor_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_4, 0.1), kwargs = {})
triton_per_fused_sum_13 = async_compile.triton('triton_per_fused_sum_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_13(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (32 + r0), None)
tmp9 = tl.load(in_ptr0 + (32 + (4*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (33 + (4*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (34 + (4*r2)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (35 + (4*r2)), None, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rs/crsq6qrm3t5shscjihgidrwsneusfut3iqrrk7iex7ld6v54lro4.py
# Topologically Sorted Source Nodes: [sum_of_rows_6], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_6 => sum_44
# Graph fragment:
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_66, [1], True), kwargs = {})
triton_poi_fused_sum_14 = async_compile.triton('triton_poi_fused_sum_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p5/cp5kffayhihgup7sw7fmivxuc6kqm5s27wsgpzsyvv6f5c7cnvv2.py
# Topologically Sorted Source Nodes: [sum_31], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_31 => sum_45
# Graph fragment:
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_70, [0], True), kwargs = {})
triton_poi_fused_sum_15 = async_compile.triton('triton_poi_fused_sum_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (33 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (34 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (35 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ql/cql2xen4uqkei7lbsi5zq5b6xq5fzb7jd63dyiyjcisvyjkosvfv.py
# Topologically Sorted Source Nodes: [sum_of_rows_7], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_7 => sum_46
# Graph fragment:
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_74, [1], True), kwargs = {})
triton_poi_fused_sum_16 = async_compile.triton('triton_poi_fused_sum_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xn/cxnxquhhhxb6dojpjiwgi5c62c3wmitjezayzfjio7k2jv7vqpsd.py
# Topologically Sorted Source Nodes: [Q_37], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_37 => div_51
# Graph fragment:
# %mul_tensor_26 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, 1), kwargs = {})
# %amax_default_26 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_26, [1], True), kwargs = {})
# %sub_tensor_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_26, %amax_default_26), kwargs = {})
# %div_tensor_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_26, 0.1), kwargs = {})
# %mul_tensor_19 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, 1), kwargs = {})
# %amax_default_19 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_19, [1], True), kwargs = {})
# %sub_tensor_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_19, %amax_default_19), kwargs = {})
# %div_tensor_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_19, 0.1), kwargs = {})
# %div_51 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_76, 4), kwargs = {})
triton_poi_fused_div_17 = async_compile.triton('triton_poi_fused_div_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (34 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (35 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (0))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ua/cuanikw4qrep32zrex3uz6h4wr62oltdxm3gxoi2zv2qybounuk4.py
# Topologically Sorted Source Nodes: [sum_Q_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q_3 => sum_64
# Graph fragment:
# %sum_64 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute_96,), kwargs = {})
triton_per_fused_sum_18 = async_compile.triton('triton_per_fused_sum_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_18(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (48 + r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xx/cxxmu5ippz2osge53d6ltlbfzj363ko2cdlhkuv422dj24x4atll.py
# Topologically Sorted Source Nodes: [sum_of_rows_9], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_9 => sum_65
# Graph fragment:
# %sum_65 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_98, [1], True), kwargs = {})
triton_poi_fused_sum_19 = async_compile.triton('triton_poi_fused_sum_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iv/civ4enldjkrehizi4iifjk4xmuvxwl4hfaywx27qia42wl4csidk.py
# Topologically Sorted Source Nodes: [sum_45], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_45 => sum_66
# Graph fragment:
# %sum_66 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_102, [0], True), kwargs = {})
triton_poi_fused_sum_20 = async_compile.triton('triton_poi_fused_sum_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (49 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (50 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (51 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pq/cpqg526fen4aq5cgrdhu4a2oin6acmaawz6mcehy7umciw4im4ix.py
# Topologically Sorted Source Nodes: [sum_of_rows_10], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_10 => sum_67
# Graph fragment:
# %sum_67 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_106, [1], True), kwargs = {})
triton_poi_fused_sum_21 = async_compile.triton('triton_poi_fused_sum_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n4/cn4plb4lv6xccoz526fpmx4tom4f3ah4sfy35nahqhzq5al6uycs.py
# Topologically Sorted Source Nodes: [Q_52], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_52 => div_73
# Graph fragment:
# %mul_tensor_25 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, 1), kwargs = {})
# %amax_default_25 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_25, [1], True), kwargs = {})
# %sub_tensor_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_25, %amax_default_25), kwargs = {})
# %div_tensor_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_25, 0.1), kwargs = {})
# %mul_tensor_18 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_11, 1), kwargs = {})
# %amax_default_18 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_18, [1], True), kwargs = {})
# %sub_tensor_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_18, %amax_default_18), kwargs = {})
# %div_tensor_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_18, 0.1), kwargs = {})
# %mul_tensor_11 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_19, 1), kwargs = {})
# %amax_default_11 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_11, [1], True), kwargs = {})
# %sub_tensor_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_11, %amax_default_11), kwargs = {})
# %div_tensor_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_11, 0.1), kwargs = {})
# %div_73 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_108, 4), kwargs = {})
triton_poi_fused_div_22 = async_compile.triton('triton_poi_fused_div_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (49 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (50 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (51 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (0))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
tl.store(out_ptr3 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vp/cvpb5mtrsn3w4irxrcazvcy633nic3hupp3lgti6splnbwum5omh.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_24 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, 1), kwargs = {})
# %amax_default_24 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_24, [1], True), kwargs = {})
# %sub_tensor_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_24, %amax_default_24), kwargs = {})
# %div_tensor_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_24, 0.1), kwargs = {})
# %mul_tensor_17 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, 1), kwargs = {})
# %amax_default_17 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_17, [1], True), kwargs = {})
# %sub_tensor_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_17, %amax_default_17), kwargs = {})
# %div_tensor_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_17, 0.1), kwargs = {})
# %mul_tensor_10 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_20, 1), kwargs = {})
# %amax_default_10 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_10, [1], True), kwargs = {})
# %sub_tensor_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_10, %amax_default_10), kwargs = {})
# %div_tensor_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_10, 0.1), kwargs = {})
triton_poi_fused_23 = async_compile.triton('triton_poi_fused_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_23(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/eb/cebycglrjxwo5vb3xw77tabmbrro5silpti6e23n5shzjssdcz2y.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_23 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, 1), kwargs = {})
# %amax_default_23 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_23, [1], True), kwargs = {})
# %sub_tensor_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_23, %amax_default_23), kwargs = {})
# %div_tensor_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_23, 0.1), kwargs = {})
# %mul_tensor_16 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_13, 1), kwargs = {})
# %amax_default_16 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_16, [1], True), kwargs = {})
# %sub_tensor_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_16, %amax_default_16), kwargs = {})
# %div_tensor_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_16, 0.1), kwargs = {})
# %mul_tensor_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_21, 1), kwargs = {})
# %amax_default_9 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_9, [1], True), kwargs = {})
# %sub_tensor_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_9, %amax_default_9), kwargs = {})
# %div_tensor_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_9, 0.1), kwargs = {})
triton_poi_fused_24 = async_compile.triton('triton_poi_fused_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (18 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (19 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wn/cwnlobrjuzsxonoax4oxycblbqzt6yimsshy6rjao2hwqcckct7e.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_22 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, 1), kwargs = {})
# %amax_default_22 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_22, [1], True), kwargs = {})
# %sub_tensor_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_22, %amax_default_22), kwargs = {})
# %div_tensor_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_22, 0.1), kwargs = {})
# %mul_tensor_15 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_14, 1), kwargs = {})
# %amax_default_15 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_15, [1], True), kwargs = {})
# %sub_tensor_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_15, %amax_default_15), kwargs = {})
# %div_tensor_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_15, 0.1), kwargs = {})
# %mul_tensor_8 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_22, 1), kwargs = {})
# %amax_default_8 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_8, [1], True), kwargs = {})
# %sub_tensor_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_8, %amax_default_8), kwargs = {})
# %div_tensor_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_8, 0.1), kwargs = {})
triton_poi_fused_25 = async_compile.triton('triton_poi_fused_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (34 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (35 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xb/cxbkq3bw5hyime7xxe4bzxgd2nnpfykwtvk22mjef7aetgsc2zrj.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_21 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, 1), kwargs = {})
# %amax_default_21 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_21, [1], True), kwargs = {})
# %sub_tensor_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_21, %amax_default_21), kwargs = {})
# %div_tensor_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_21, 0.1), kwargs = {})
# %mul_tensor_14 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_15, 1), kwargs = {})
# %amax_default_14 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_14, [1], True), kwargs = {})
# %sub_tensor_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_14, %amax_default_14), kwargs = {})
# %div_tensor_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_14, 0.1), kwargs = {})
# %mul_tensor_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_23, 1), kwargs = {})
# %amax_default_7 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_7, [1], True), kwargs = {})
# %sub_tensor_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_7, %amax_default_7), kwargs = {})
# %div_tensor_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_7, 0.1), kwargs = {})
triton_poi_fused_26 = async_compile.triton('triton_poi_fused_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_26(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (49 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (50 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (51 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pn/cpnagcvhwbqe2rcvfgvmjcaxohah7uzhblpuio2lg6rdtez25o2c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_28, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [1], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 0.1), kwargs = {})
triton_poi_fused_27 = async_compile.triton('triton_poi_fused_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_27(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iw/ciw3zbnnlvqlllmj5a5gpcprapqy57rowpipu7cpawxbmcstu2i3.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_29, 1), kwargs = {})
# %amax_default_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_2, [1], True), kwargs = {})
# %sub_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_2, %amax_default_2), kwargs = {})
# %div_tensor_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_2, 0.1), kwargs = {})
triton_poi_fused_28 = async_compile.triton('triton_poi_fused_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_28(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (18 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (19 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/24/c24vhpuzwkiqxza7n6dhlgskxjaib3f3u2rsi724hw7jp7njwrjn.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_30, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 0.1), kwargs = {})
triton_poi_fused_29 = async_compile.triton('triton_poi_fused_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_29(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (34 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (35 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qb/cqb4xqvf3w6zhnq2pjwk5ebvybb74fgqjxi3x4voqh2rgecnzvjg.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_31, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 0.1), kwargs = {})
triton_poi_fused_30 = async_compile.triton('triton_poi_fused_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_30(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (49 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (50 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (51 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xt/cxtuij5x42zgn4axuvkz6eiv2wtv6voipl2dxqeobqxx2vdpsins.py
# Topologically Sorted Source Nodes: [log_softmax, mul, sum_8, mean, neg, subloss, log_softmax_1, mul_1, sum_9, mean_1, neg_1, subloss_1, log_softmax_2, mul_2, sum_10, mean_2, neg_2, subloss_2, log_softmax_3, mul_3, sum_11, mean_3, neg_3, subloss_3, log_softmax_4, mul_4, sum_12, mean_4, neg_4, subloss_4, log_softmax_5, mul_5, sum_13, mean_5, neg_5, subloss_5, log_softmax_6, mul_6, sum_14, mean_6, neg_6, subloss_6, truediv_8, loss, log_softmax_7, mul_7, sum_22, mean_7, neg_7, subloss_7, log_softmax_8, mul_8, sum_23, mean_8, neg_8, subloss_8, log_softmax_9, mul_9, sum_24, mean_9, neg_9, subloss_9, log_softmax_10, mul_10, sum_25, mean_10, neg_10, subloss_10, log_softmax_11, mul_11, sum_26, mean_11, neg_11, subloss_11, log_softmax_12, mul_12, sum_27, mean_12, neg_12, subloss_12, log_softmax_13, mul_13, sum_28, mean_13, neg_13, subloss_13, truediv_17, loss_1, log_softmax_14, mul_14, sum_36, mean_14, neg_14, subloss_14, log_softmax_15, mul_15, sum_37, mean_15, neg_15, subloss_15, log_softmax_16, mul_16, sum_38, mean_16, neg_16, subloss_16, log_softmax_17, mul_17, sum_39, mean_17, neg_17, subloss_17, log_softmax_18, mul_18, sum_40, mean_18, neg_18, subloss_18, log_softmax_19, mul_19, sum_41, mean_19, neg_19, subloss_19, log_softmax_20, mul_20, sum_42, mean_20, neg_20, subloss_20, truediv_26, loss_2, log_softmax_21, mul_21, sum_50, mean_21, neg_21, subloss_21, log_softmax_22, mul_22, sum_51, mean_22, neg_22, subloss_22, log_softmax_23, mul_23, sum_52, mean_23, neg_23, subloss_23, log_softmax_24, mul_24, sum_53, mean_24, neg_24, subloss_24, log_softmax_25, mul_25, sum_54, mean_25, neg_25, subloss_25, log_softmax_26, mul_26, sum_55, mean_26, neg_26, subloss_26, log_softmax_27, mul_27, sum_56, mean_27, neg_27, subloss_27, truediv_35, loss_3, truediv_36], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.mean, aten.neg, aten.add, aten.div]
# Source node to ATen node mapping:
# log_softmax => exp_1, log, sub_1, sum_8
# log_softmax_1 => exp_2, log_1, sub_3, sum_10
# log_softmax_10 => exp_12, log_10, sub_21, sum_35
# log_softmax_11 => exp_13, log_11, sub_23, sum_37
# log_softmax_12 => exp_14, log_12, sub_25, sum_39
# log_softmax_13 => exp_15, log_13, sub_27, sum_41
# log_softmax_14 => exp_17, log_14, sub_29, sum_50
# log_softmax_15 => exp_18, log_15, sub_31, sum_52
# log_softmax_16 => exp_19, log_16, sub_33, sum_54
# log_softmax_17 => exp_20, log_17, sub_35, sum_56
# log_softmax_18 => exp_21, log_18, sub_37, sum_58
# log_softmax_19 => exp_22, log_19, sub_39, sum_60
# log_softmax_2 => exp_3, log_2, sub_5, sum_12
# log_softmax_20 => exp_23, log_20, sub_41, sum_62
# log_softmax_21 => exp_25, log_21, sub_43, sum_71
# log_softmax_22 => exp_26, log_22, sub_45, sum_73
# log_softmax_23 => exp_27, log_23, sub_47, sum_75
# log_softmax_24 => exp_28, log_24, sub_49, sum_77
# log_softmax_25 => exp_29, log_25, sub_51, sum_79
# log_softmax_26 => exp_30, log_26, sub_53, sum_81
# log_softmax_27 => exp_31, log_27, sub_55, sum_83
# log_softmax_3 => exp_4, log_3, sub_7, sum_14
# log_softmax_4 => exp_5, log_4, sub_9, sum_16
# log_softmax_5 => exp_6, log_5, sub_11, sum_18
# log_softmax_6 => exp_7, log_6, sub_13, sum_20
# log_softmax_7 => exp_9, log_7, sub_15, sum_29
# log_softmax_8 => exp_10, log_8, sub_17, sum_31
# log_softmax_9 => exp_11, log_9, sub_19, sum_33
# loss => add_7
# loss_1 => add_15
# loss_2 => add_23
# loss_3 => add_31
# mean => mean
# mean_1 => mean_1
# mean_10 => mean_10
# mean_11 => mean_11
# mean_12 => mean_12
# mean_13 => mean_13
# mean_14 => mean_14
# mean_15 => mean_15
# mean_16 => mean_16
# mean_17 => mean_17
# mean_18 => mean_18
# mean_19 => mean_19
# mean_2 => mean_2
# mean_20 => mean_20
# mean_21 => mean_21
# mean_22 => mean_22
# mean_23 => mean_23
# mean_24 => mean_24
# mean_25 => mean_25
# mean_26 => mean_26
# mean_27 => mean_27
# mean_3 => mean_3
# mean_4 => mean_4
# mean_5 => mean_5
# mean_6 => mean_6
# mean_7 => mean_7
# mean_8 => mean_8
# mean_9 => mean_9
# mul => mul_1
# mul_1 => mul_2
# mul_10 => mul_12
# mul_11 => mul_13
# mul_12 => mul_14
# mul_13 => mul_15
# mul_14 => mul_17
# mul_15 => mul_18
# mul_16 => mul_19
# mul_17 => mul_20
# mul_18 => mul_21
# mul_19 => mul_22
# mul_2 => mul_3
# mul_20 => mul_23
# mul_21 => mul_25
# mul_22 => mul_26
# mul_23 => mul_27
# mul_24 => mul_28
# mul_25 => mul_29
# mul_26 => mul_30
# mul_27 => mul_31
# mul_3 => mul_4
# mul_4 => mul_5
# mul_5 => mul_6
# mul_6 => mul_7
# mul_7 => mul_9
# mul_8 => mul_10
# mul_9 => mul_11
# neg => neg
# neg_1 => neg_1
# neg_10 => neg_10
# neg_11 => neg_11
# neg_12 => neg_12
# neg_13 => neg_13
# neg_14 => neg_14
# neg_15 => neg_15
# neg_16 => neg_16
# neg_17 => neg_17
# neg_18 => neg_18
# neg_19 => neg_19
# neg_2 => neg_2
# neg_20 => neg_20
# neg_21 => neg_21
# neg_22 => neg_22
# neg_23 => neg_23
# neg_24 => neg_24
# neg_25 => neg_25
# neg_26 => neg_26
# neg_27 => neg_27
# neg_3 => neg_3
# neg_4 => neg_4
# neg_5 => neg_5
# neg_6 => neg_6
# neg_7 => neg_7
# neg_8 => neg_8
# neg_9 => neg_9
# subloss => add
# subloss_1 => add_1
# subloss_10 => add_11
# subloss_11 => add_12
# subloss_12 => add_13
# subloss_13 => add_14
# subloss_14 => add_16
# subloss_15 => add_17
# subloss_16 => add_18
# subloss_17 => add_19
# subloss_18 => add_20
# subloss_19 => add_21
# subloss_2 => add_2
# subloss_20 => add_22
# subloss_21 => add_24
# subloss_22 => add_25
# subloss_23 => add_26
# subloss_24 => add_27
# subloss_25 => add_28
# subloss_26 => add_29
# subloss_27 => add_30
# subloss_3 => add_3
# subloss_4 => add_4
# subloss_5 => add_5
# subloss_6 => add_6
# subloss_7 => add_8
# subloss_8 => add_9
# subloss_9 => add_10
# sum_10 => sum_13
# sum_11 => sum_15
# sum_12 => sum_17
# sum_13 => sum_19
# sum_14 => sum_21
# sum_22 => sum_30
# sum_23 => sum_32
# sum_24 => sum_34
# sum_25 => sum_36
# sum_26 => sum_38
# sum_27 => sum_40
# sum_28 => sum_42
# sum_36 => sum_51
# sum_37 => sum_53
# sum_38 => sum_55
# sum_39 => sum_57
# sum_40 => sum_59
# sum_41 => sum_61
# sum_42 => sum_63
# sum_50 => sum_72
# sum_51 => sum_74
# sum_52 => sum_76
# sum_53 => sum_78
# sum_54 => sum_80
# sum_55 => sum_82
# sum_56 => sum_84
# sum_8 => sum_9
# sum_9 => sum_11
# truediv_17 => div_43
# truediv_26 => div_65
# truediv_35 => div_87
# truediv_36 => div_88
# truediv_8 => div_21
# Graph fragment:
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_27,), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_8,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_27, %log), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_1), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_9,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, 0.0), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_26,), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_10,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_26, %log_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_3), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_11,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %neg_1), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_25,), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_12,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_25, %log_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_5), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_13,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_2,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %neg_2), kwargs = {})
# %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_24,), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [1], True), kwargs = {})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_14,), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_24, %log_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_7), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_15,), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_3,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %neg_3), kwargs = {})
# %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_23,), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_5, [1], True), kwargs = {})
# %log_4 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_16,), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_23, %log_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_9), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [1]), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_17,), kwargs = {})
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_4,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %neg_4), kwargs = {})
# %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_22,), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_6, [1], True), kwargs = {})
# %log_5 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_18,), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_22, %log_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_11), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [1]), kwargs = {})
# %mean_5 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_19,), kwargs = {})
# %neg_5 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_5,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %neg_5), kwargs = {})
# %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_21,), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_7, [1], True), kwargs = {})
# %log_6 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_20,), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_21, %log_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_13), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [1]), kwargs = {})
# %mean_6 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_21,), kwargs = {})
# %neg_6 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_6,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %neg_6), kwargs = {})
# %div_21 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_6, 7), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_21, 0.0), kwargs = {})
# %exp_9 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_20,), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_9, [1], True), kwargs = {})
# %log_7 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_29,), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_20, %log_7), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_15), kwargs = {})
# %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [1]), kwargs = {})
# %mean_7 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_30,), kwargs = {})
# %neg_7 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_7,), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_7, 0.0), kwargs = {})
# %exp_10 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_19,), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_10, [1], True), kwargs = {})
# %log_8 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_31,), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_19, %log_8), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_17), kwargs = {})
# %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [1]), kwargs = {})
# %mean_8 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_32,), kwargs = {})
# %neg_8 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_8,), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %neg_8), kwargs = {})
# %exp_11 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_18,), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_11, [1], True), kwargs = {})
# %log_9 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_33,), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_18, %log_9), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_19), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_11, [1]), kwargs = {})
# %mean_9 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_34,), kwargs = {})
# %neg_9 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_9,), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %neg_9), kwargs = {})
# %exp_12 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_17,), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_12, [1], True), kwargs = {})
# %log_10 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_35,), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_17, %log_10), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_21), kwargs = {})
# %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [1]), kwargs = {})
# %mean_10 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_36,), kwargs = {})
# %neg_10 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_10,), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %neg_10), kwargs = {})
# %exp_13 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_16,), kwargs = {})
# %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_13, [1], True), kwargs = {})
# %log_11 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_37,), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_16, %log_11), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_23), kwargs = {})
# %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_13, [1]), kwargs = {})
# %mean_11 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_38,), kwargs = {})
# %neg_11 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_11,), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %neg_11), kwargs = {})
# %exp_14 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_15,), kwargs = {})
# %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_14, [1], True), kwargs = {})
# %log_12 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_39,), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_15, %log_12), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_25), kwargs = {})
# %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_14, [1]), kwargs = {})
# %mean_12 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_40,), kwargs = {})
# %neg_12 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_12,), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %neg_12), kwargs = {})
# %exp_15 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_14,), kwargs = {})
# %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_15, [1], True), kwargs = {})
# %log_13 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_41,), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_14, %log_13), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_27), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_15, [1]), kwargs = {})
# %mean_13 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_42,), kwargs = {})
# %neg_13 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_13,), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %neg_13), kwargs = {})
# %div_43 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_14, 7), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %div_43), kwargs = {})
# %exp_17 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_13,), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_17, [1], True), kwargs = {})
# %log_14 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_50,), kwargs = {})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_13, %log_14), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_29), kwargs = {})
# %sum_51 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_17, [1]), kwargs = {})
# %mean_14 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_51,), kwargs = {})
# %neg_14 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_14,), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_14, 0.0), kwargs = {})
# %exp_18 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_12,), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_18, [1], True), kwargs = {})
# %log_15 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_52,), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_12, %log_15), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_31), kwargs = {})
# %sum_53 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_18, [1]), kwargs = {})
# %mean_15 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_53,), kwargs = {})
# %neg_15 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_15,), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %neg_15), kwargs = {})
# %exp_19 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_11,), kwargs = {})
# %sum_54 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_19, [1], True), kwargs = {})
# %log_16 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_54,), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_11, %log_16), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_33), kwargs = {})
# %sum_55 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_19, [1]), kwargs = {})
# %mean_16 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_55,), kwargs = {})
# %neg_16 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_16,), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %neg_16), kwargs = {})
# %exp_20 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_10,), kwargs = {})
# %sum_56 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_20, [1], True), kwargs = {})
# %log_17 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_56,), kwargs = {})
# %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_10, %log_17), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_35), kwargs = {})
# %sum_57 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_20, [1]), kwargs = {})
# %mean_17 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_57,), kwargs = {})
# %neg_17 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_17,), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_18, %neg_17), kwargs = {})
# %exp_21 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_9,), kwargs = {})
# %sum_58 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_21, [1], True), kwargs = {})
# %log_18 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_58,), kwargs = {})
# %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_9, %log_18), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_37), kwargs = {})
# %sum_59 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_21, [1]), kwargs = {})
# %mean_18 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_59,), kwargs = {})
# %neg_18 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_18,), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %neg_18), kwargs = {})
# %exp_22 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_8,), kwargs = {})
# %sum_60 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_22, [1], True), kwargs = {})
# %log_19 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_60,), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_8, %log_19), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_39), kwargs = {})
# %sum_61 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_22, [1]), kwargs = {})
# %mean_19 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_61,), kwargs = {})
# %neg_19 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_19,), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %neg_19), kwargs = {})
# %exp_23 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_7,), kwargs = {})
# %sum_62 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_23, [1], True), kwargs = {})
# %log_20 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_62,), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_7, %log_20), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_41), kwargs = {})
# %sum_63 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_23, [1]), kwargs = {})
# %mean_20 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_63,), kwargs = {})
# %neg_20 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_20,), kwargs = {})
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_21, %neg_20), kwargs = {})
# %div_65 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_22, 7), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_15, %div_65), kwargs = {})
# %exp_25 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_6,), kwargs = {})
# %sum_71 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_25, [1], True), kwargs = {})
# %log_21 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_71,), kwargs = {})
# %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_6, %log_21), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_43), kwargs = {})
# %sum_72 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_25, [1]), kwargs = {})
# %mean_21 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_72,), kwargs = {})
# %neg_21 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_21,), kwargs = {})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_21, 0.0), kwargs = {})
# %exp_26 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_5,), kwargs = {})
# %sum_73 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_26, [1], True), kwargs = {})
# %log_22 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_73,), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_5, %log_22), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_45), kwargs = {})
# %sum_74 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_26, [1]), kwargs = {})
# %mean_22 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_74,), kwargs = {})
# %neg_22 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_22,), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_24, %neg_22), kwargs = {})
# %exp_27 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_4,), kwargs = {})
# %sum_75 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_27, [1], True), kwargs = {})
# %log_23 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_75,), kwargs = {})
# %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_4, %log_23), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_47), kwargs = {})
# %sum_76 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_27, [1]), kwargs = {})
# %mean_23 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_76,), kwargs = {})
# %neg_23 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_23,), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %neg_23), kwargs = {})
# %exp_28 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
# %sum_77 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_28, [1], True), kwargs = {})
# %log_24 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_77,), kwargs = {})
# %sub_49 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_3, %log_24), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_49), kwargs = {})
# %sum_78 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_28, [1]), kwargs = {})
# %mean_24 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_78,), kwargs = {})
# %neg_24 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_24,), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %neg_24), kwargs = {})
# %exp_29 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_2,), kwargs = {})
# %sum_79 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_29, [1], True), kwargs = {})
# %log_25 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_79,), kwargs = {})
# %sub_51 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_2, %log_25), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_51), kwargs = {})
# %sum_80 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_29, [1]), kwargs = {})
# %mean_25 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_80,), kwargs = {})
# %neg_25 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_25,), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_27, %neg_25), kwargs = {})
# %exp_30 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_81 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_30, [1], True), kwargs = {})
# %log_26 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_81,), kwargs = {})
# %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log_26), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_53), kwargs = {})
# %sum_82 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_30, [1]), kwargs = {})
# %mean_26 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_82,), kwargs = {})
# %neg_26 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_26,), kwargs = {})
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_28, %neg_26), kwargs = {})
# %exp_31 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_83 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_31, [1], True), kwargs = {})
# %log_27 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_83,), kwargs = {})
# %sub_55 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor, %log_27), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_55), kwargs = {})
# %sum_84 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_31, [1]), kwargs = {})
# %mean_27 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_84,), kwargs = {})
# %neg_27 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_27,), kwargs = {})
# %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_29, %neg_27), kwargs = {})
# %div_87 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_30, 7), kwargs = {})
# %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_23, %div_87), kwargs = {})
# %div_88 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_31, 4), kwargs = {})
triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31 = async_compile.triton('triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: 'i32', 34: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {33: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), equal_to_1=(33,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 128, 'num_reduction': 28, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr3 + (4*r0), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr3 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr3 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr4 + (4*r0), None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr4 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp87 = tl.load(in_ptr4 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr4 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr5 + (4*r0), None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr5 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr5 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp116 = tl.load(in_ptr5 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp134 = tl.load(in_ptr6 + (4*r0), None, eviction_policy='evict_last')
tmp136 = tl.load(in_ptr6 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp139 = tl.load(in_ptr6 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp142 = tl.load(in_ptr6 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp160 = tl.load(in_ptr7 + (4*r0), None, eviction_policy='evict_last')
tmp162 = tl.load(in_ptr7 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp165 = tl.load(in_ptr7 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp168 = tl.load(in_ptr7 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp186 = tl.load(in_ptr8 + (4*r0), None, eviction_policy='evict_last')
tmp187 = tl.load(in_ptr9 + (4*r0), None, eviction_policy='evict_last')
tmp189 = tl.load(in_ptr9 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp192 = tl.load(in_ptr9 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp195 = tl.load(in_ptr9 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp201 = tl.load(in_ptr8 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp205 = tl.load(in_ptr8 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp209 = tl.load(in_ptr8 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp216 = tl.load(in_ptr10 + (4*r0), None, eviction_policy='evict_last')
tmp218 = tl.load(in_ptr10 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp221 = tl.load(in_ptr10 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp224 = tl.load(in_ptr10 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp242 = tl.load(in_ptr11 + (4*r0), None, eviction_policy='evict_last')
tmp244 = tl.load(in_ptr11 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp247 = tl.load(in_ptr11 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp250 = tl.load(in_ptr11 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp268 = tl.load(in_ptr12 + (4*r0), None, eviction_policy='evict_last')
tmp270 = tl.load(in_ptr12 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp273 = tl.load(in_ptr12 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp276 = tl.load(in_ptr12 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp294 = tl.load(in_ptr13 + (4*r0), None, eviction_policy='evict_last')
tmp296 = tl.load(in_ptr13 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp299 = tl.load(in_ptr13 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp302 = tl.load(in_ptr13 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp320 = tl.load(in_ptr14 + (4*r0), None, eviction_policy='evict_last')
tmp322 = tl.load(in_ptr14 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp325 = tl.load(in_ptr14 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp328 = tl.load(in_ptr14 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp346 = tl.load(in_ptr15 + (4*r0), None, eviction_policy='evict_last')
tmp348 = tl.load(in_ptr15 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp351 = tl.load(in_ptr15 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp354 = tl.load(in_ptr15 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp372 = tl.load(in_ptr16 + (4*r0), None, eviction_policy='evict_last')
tmp373 = tl.load(in_ptr17 + (4*r0), None, eviction_policy='evict_last')
tmp375 = tl.load(in_ptr17 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp378 = tl.load(in_ptr17 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp381 = tl.load(in_ptr17 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp387 = tl.load(in_ptr16 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp391 = tl.load(in_ptr16 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp395 = tl.load(in_ptr16 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp402 = tl.load(in_ptr18 + (4*r0), None, eviction_policy='evict_last')
tmp404 = tl.load(in_ptr18 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp407 = tl.load(in_ptr18 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp410 = tl.load(in_ptr18 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp428 = tl.load(in_ptr19 + (4*r0), None, eviction_policy='evict_last')
tmp430 = tl.load(in_ptr19 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp433 = tl.load(in_ptr19 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp436 = tl.load(in_ptr19 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp454 = tl.load(in_ptr20 + (4*r0), None, eviction_policy='evict_last')
tmp456 = tl.load(in_ptr20 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp459 = tl.load(in_ptr20 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp462 = tl.load(in_ptr20 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp480 = tl.load(in_ptr21 + (4*r0), None, eviction_policy='evict_last')
tmp482 = tl.load(in_ptr21 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp485 = tl.load(in_ptr21 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp488 = tl.load(in_ptr21 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp506 = tl.load(in_ptr22 + (4*r0), None, eviction_policy='evict_last')
tmp508 = tl.load(in_ptr22 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp511 = tl.load(in_ptr22 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp514 = tl.load(in_ptr22 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp532 = tl.load(in_ptr23 + (4*r0), None, eviction_policy='evict_last')
tmp534 = tl.load(in_ptr23 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp537 = tl.load(in_ptr23 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp540 = tl.load(in_ptr23 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp558 = tl.load(in_ptr24 + (4*r0), None, eviction_policy='evict_last')
tmp559 = tl.load(in_ptr25 + (4*r0), None, eviction_policy='evict_last')
tmp561 = tl.load(in_ptr25 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp564 = tl.load(in_ptr25 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp567 = tl.load(in_ptr25 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp573 = tl.load(in_ptr24 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp577 = tl.load(in_ptr24 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp581 = tl.load(in_ptr24 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp588 = tl.load(in_ptr26 + (4*r0), None, eviction_policy='evict_last')
tmp590 = tl.load(in_ptr26 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp593 = tl.load(in_ptr26 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp596 = tl.load(in_ptr26 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp614 = tl.load(in_ptr27 + (4*r0), None, eviction_policy='evict_last')
tmp616 = tl.load(in_ptr27 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp619 = tl.load(in_ptr27 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp622 = tl.load(in_ptr27 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp640 = tl.load(in_ptr28 + (4*r0), None, eviction_policy='evict_last')
tmp642 = tl.load(in_ptr28 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp645 = tl.load(in_ptr28 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp648 = tl.load(in_ptr28 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp666 = tl.load(in_ptr29 + (4*r0), None, eviction_policy='evict_last')
tmp668 = tl.load(in_ptr29 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp671 = tl.load(in_ptr29 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp674 = tl.load(in_ptr29 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp692 = tl.load(in_ptr30 + (4*r0), None, eviction_policy='evict_last')
tmp694 = tl.load(in_ptr30 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp697 = tl.load(in_ptr30 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp700 = tl.load(in_ptr30 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp718 = tl.load(in_ptr31 + (4*r0), None, eviction_policy='evict_last')
tmp720 = tl.load(in_ptr31 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp723 = tl.load(in_ptr31 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp726 = tl.load(in_ptr31 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp31 = tl_math.exp(tmp30)
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tl_math.log(tmp40)
tmp42 = tmp30 - tmp41
tmp43 = tmp0 * tmp42
tmp44 = tmp32 - tmp41
tmp45 = tmp15 * tmp44
tmp46 = tmp43 + tmp45
tmp47 = tmp35 - tmp41
tmp48 = tmp19 * tmp47
tmp49 = tmp46 + tmp48
tmp50 = tmp38 - tmp41
tmp51 = tmp23 * tmp50
tmp52 = tmp49 + tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = tl.sum(tmp53, 1)[:, None]
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp56 - tmp67
tmp69 = tmp0 * tmp68
tmp70 = tmp58 - tmp67
tmp71 = tmp15 * tmp70
tmp72 = tmp69 + tmp71
tmp73 = tmp61 - tmp67
tmp74 = tmp19 * tmp73
tmp75 = tmp72 + tmp74
tmp76 = tmp64 - tmp67
tmp77 = tmp23 * tmp76
tmp78 = tmp75 + tmp77
tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK])
tmp81 = tl.sum(tmp79, 1)[:, None]
tmp83 = tl_math.exp(tmp82)
tmp85 = tl_math.exp(tmp84)
tmp86 = tmp83 + tmp85
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp86 + tmp88
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tl_math.log(tmp92)
tmp94 = tmp82 - tmp93
tmp95 = tmp0 * tmp94
tmp96 = tmp84 - tmp93
tmp97 = tmp15 * tmp96
tmp98 = tmp95 + tmp97
tmp99 = tmp87 - tmp93
tmp100 = tmp19 * tmp99
tmp101 = tmp98 + tmp100
tmp102 = tmp90 - tmp93
tmp103 = tmp23 * tmp102
tmp104 = tmp101 + tmp103
tmp105 = tl.broadcast_to(tmp104, [XBLOCK, RBLOCK])
tmp107 = tl.sum(tmp105, 1)[:, None]
tmp109 = tl_math.exp(tmp108)
tmp111 = tl_math.exp(tmp110)
tmp112 = tmp109 + tmp111
tmp114 = tl_math.exp(tmp113)
tmp115 = tmp112 + tmp114
tmp117 = tl_math.exp(tmp116)
tmp118 = tmp115 + tmp117
tmp119 = tl_math.log(tmp118)
tmp120 = tmp108 - tmp119
tmp121 = tmp0 * tmp120
tmp122 = tmp110 - tmp119
tmp123 = tmp15 * tmp122
tmp124 = tmp121 + tmp123
tmp125 = tmp113 - tmp119
tmp126 = tmp19 * tmp125
tmp127 = tmp124 + tmp126
tmp128 = tmp116 - tmp119
tmp129 = tmp23 * tmp128
tmp130 = tmp127 + tmp129
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp133 = tl.sum(tmp131, 1)[:, None]
tmp135 = tl_math.exp(tmp134)
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tmp143 = tl_math.exp(tmp142)
tmp144 = tmp141 + tmp143
tmp145 = tl_math.log(tmp144)
tmp146 = tmp134 - tmp145
tmp147 = tmp0 * tmp146
tmp148 = tmp136 - tmp145
tmp149 = tmp15 * tmp148
tmp150 = tmp147 + tmp149
tmp151 = tmp139 - tmp145
tmp152 = tmp19 * tmp151
tmp153 = tmp150 + tmp152
tmp154 = tmp142 - tmp145
tmp155 = tmp23 * tmp154
tmp156 = tmp153 + tmp155
tmp157 = tl.broadcast_to(tmp156, [XBLOCK, RBLOCK])
tmp159 = tl.sum(tmp157, 1)[:, None]
tmp161 = tl_math.exp(tmp160)
tmp163 = tl_math.exp(tmp162)
tmp164 = tmp161 + tmp163
tmp166 = tl_math.exp(tmp165)
tmp167 = tmp164 + tmp166
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp167 + tmp169
tmp171 = tl_math.log(tmp170)
tmp172 = tmp160 - tmp171
tmp173 = tmp0 * tmp172
tmp174 = tmp162 - tmp171
tmp175 = tmp15 * tmp174
tmp176 = tmp173 + tmp175
tmp177 = tmp165 - tmp171
tmp178 = tmp19 * tmp177
tmp179 = tmp176 + tmp178
tmp180 = tmp168 - tmp171
tmp181 = tmp23 * tmp180
tmp182 = tmp179 + tmp181
tmp183 = tl.broadcast_to(tmp182, [XBLOCK, RBLOCK])
tmp185 = tl.sum(tmp183, 1)[:, None]
tmp188 = tl_math.exp(tmp187)
tmp190 = tl_math.exp(tmp189)
tmp191 = tmp188 + tmp190
tmp193 = tl_math.exp(tmp192)
tmp194 = tmp191 + tmp193
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp194 + tmp196
tmp198 = tl_math.log(tmp197)
tmp199 = tmp187 - tmp198
tmp200 = tmp186 * tmp199
tmp202 = tmp189 - tmp198
tmp203 = tmp201 * tmp202
tmp204 = tmp200 + tmp203
tmp206 = tmp192 - tmp198
tmp207 = tmp205 * tmp206
tmp208 = tmp204 + tmp207
tmp210 = tmp195 - tmp198
tmp211 = tmp209 * tmp210
tmp212 = tmp208 + tmp211
tmp213 = tl.broadcast_to(tmp212, [XBLOCK, RBLOCK])
tmp215 = tl.sum(tmp213, 1)[:, None]
tmp217 = tl_math.exp(tmp216)
tmp219 = tl_math.exp(tmp218)
tmp220 = tmp217 + tmp219
tmp222 = tl_math.exp(tmp221)
tmp223 = tmp220 + tmp222
tmp225 = tl_math.exp(tmp224)
tmp226 = tmp223 + tmp225
tmp227 = tl_math.log(tmp226)
tmp228 = tmp216 - tmp227
tmp229 = tmp186 * tmp228
tmp230 = tmp218 - tmp227
tmp231 = tmp201 * tmp230
tmp232 = tmp229 + tmp231
tmp233 = tmp221 - tmp227
tmp234 = tmp205 * tmp233
tmp235 = tmp232 + tmp234
tmp236 = tmp224 - tmp227
tmp237 = tmp209 * tmp236
tmp238 = tmp235 + tmp237
tmp239 = tl.broadcast_to(tmp238, [XBLOCK, RBLOCK])
tmp241 = tl.sum(tmp239, 1)[:, None]
tmp243 = tl_math.exp(tmp242)
tmp245 = tl_math.exp(tmp244)
tmp246 = tmp243 + tmp245
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp246 + tmp248
tmp251 = tl_math.exp(tmp250)
tmp252 = tmp249 + tmp251
tmp253 = tl_math.log(tmp252)
tmp254 = tmp242 - tmp253
tmp255 = tmp186 * tmp254
tmp256 = tmp244 - tmp253
tmp257 = tmp201 * tmp256
tmp258 = tmp255 + tmp257
tmp259 = tmp247 - tmp253
tmp260 = tmp205 * tmp259
tmp261 = tmp258 + tmp260
tmp262 = tmp250 - tmp253
tmp263 = tmp209 * tmp262
tmp264 = tmp261 + tmp263
tmp265 = tl.broadcast_to(tmp264, [XBLOCK, RBLOCK])
tmp267 = tl.sum(tmp265, 1)[:, None]
tmp269 = tl_math.exp(tmp268)
tmp271 = tl_math.exp(tmp270)
tmp272 = tmp269 + tmp271
tmp274 = tl_math.exp(tmp273)
tmp275 = tmp272 + tmp274
tmp277 = tl_math.exp(tmp276)
tmp278 = tmp275 + tmp277
tmp279 = tl_math.log(tmp278)
tmp280 = tmp268 - tmp279
tmp281 = tmp186 * tmp280
tmp282 = tmp270 - tmp279
tmp283 = tmp201 * tmp282
tmp284 = tmp281 + tmp283
tmp285 = tmp273 - tmp279
tmp286 = tmp205 * tmp285
tmp287 = tmp284 + tmp286
tmp288 = tmp276 - tmp279
tmp289 = tmp209 * tmp288
tmp290 = tmp287 + tmp289
tmp291 = tl.broadcast_to(tmp290, [XBLOCK, RBLOCK])
tmp293 = tl.sum(tmp291, 1)[:, None]
tmp295 = tl_math.exp(tmp294)
tmp297 = tl_math.exp(tmp296)
tmp298 = tmp295 + tmp297
tmp300 = tl_math.exp(tmp299)
tmp301 = tmp298 + tmp300
tmp303 = tl_math.exp(tmp302)
tmp304 = tmp301 + tmp303
tmp305 = tl_math.log(tmp304)
tmp306 = tmp294 - tmp305
tmp307 = tmp186 * tmp306
tmp308 = tmp296 - tmp305
tmp309 = tmp201 * tmp308
tmp310 = tmp307 + tmp309
tmp311 = tmp299 - tmp305
tmp312 = tmp205 * tmp311
tmp313 = tmp310 + tmp312
tmp314 = tmp302 - tmp305
tmp315 = tmp209 * tmp314
tmp316 = tmp313 + tmp315
tmp317 = tl.broadcast_to(tmp316, [XBLOCK, RBLOCK])
tmp319 = tl.sum(tmp317, 1)[:, None]
tmp321 = tl_math.exp(tmp320)
tmp323 = tl_math.exp(tmp322)
tmp324 = tmp321 + tmp323
tmp326 = tl_math.exp(tmp325)
tmp327 = tmp324 + tmp326
tmp329 = tl_math.exp(tmp328)
tmp330 = tmp327 + tmp329
tmp331 = tl_math.log(tmp330)
tmp332 = tmp320 - tmp331
tmp333 = tmp186 * tmp332
tmp334 = tmp322 - tmp331
tmp335 = tmp201 * tmp334
tmp336 = tmp333 + tmp335
tmp337 = tmp325 - tmp331
tmp338 = tmp205 * tmp337
tmp339 = tmp336 + tmp338
tmp340 = tmp328 - tmp331
tmp341 = tmp209 * tmp340
tmp342 = tmp339 + tmp341
tmp343 = tl.broadcast_to(tmp342, [XBLOCK, RBLOCK])
tmp345 = tl.sum(tmp343, 1)[:, None]
tmp347 = tl_math.exp(tmp346)
tmp349 = tl_math.exp(tmp348)
tmp350 = tmp347 + tmp349
tmp352 = tl_math.exp(tmp351)
tmp353 = tmp350 + tmp352
tmp355 = tl_math.exp(tmp354)
tmp356 = tmp353 + tmp355
tmp357 = tl_math.log(tmp356)
tmp358 = tmp346 - tmp357
tmp359 = tmp186 * tmp358
tmp360 = tmp348 - tmp357
tmp361 = tmp201 * tmp360
tmp362 = tmp359 + tmp361
tmp363 = tmp351 - tmp357
tmp364 = tmp205 * tmp363
tmp365 = tmp362 + tmp364
tmp366 = tmp354 - tmp357
tmp367 = tmp209 * tmp366
tmp368 = tmp365 + tmp367
tmp369 = tl.broadcast_to(tmp368, [XBLOCK, RBLOCK])
tmp371 = tl.sum(tmp369, 1)[:, None]
tmp374 = tl_math.exp(tmp373)
tmp376 = tl_math.exp(tmp375)
tmp377 = tmp374 + tmp376
tmp379 = tl_math.exp(tmp378)
tmp380 = tmp377 + tmp379
tmp382 = tl_math.exp(tmp381)
tmp383 = tmp380 + tmp382
tmp384 = tl_math.log(tmp383)
tmp385 = tmp373 - tmp384
tmp386 = tmp372 * tmp385
tmp388 = tmp375 - tmp384
tmp389 = tmp387 * tmp388
tmp390 = tmp386 + tmp389
tmp392 = tmp378 - tmp384
tmp393 = tmp391 * tmp392
tmp394 = tmp390 + tmp393
tmp396 = tmp381 - tmp384
tmp397 = tmp395 * tmp396
tmp398 = tmp394 + tmp397
tmp399 = tl.broadcast_to(tmp398, [XBLOCK, RBLOCK])
tmp401 = tl.sum(tmp399, 1)[:, None]
tmp403 = tl_math.exp(tmp402)
tmp405 = tl_math.exp(tmp404)
tmp406 = tmp403 + tmp405
tmp408 = tl_math.exp(tmp407)
tmp409 = tmp406 + tmp408
tmp411 = tl_math.exp(tmp410)
tmp412 = tmp409 + tmp411
tmp413 = tl_math.log(tmp412)
tmp414 = tmp402 - tmp413
tmp415 = tmp372 * tmp414
tmp416 = tmp404 - tmp413
tmp417 = tmp387 * tmp416
tmp418 = tmp415 + tmp417
tmp419 = tmp407 - tmp413
tmp420 = tmp391 * tmp419
tmp421 = tmp418 + tmp420
tmp422 = tmp410 - tmp413
tmp423 = tmp395 * tmp422
tmp424 = tmp421 + tmp423
tmp425 = tl.broadcast_to(tmp424, [XBLOCK, RBLOCK])
tmp427 = tl.sum(tmp425, 1)[:, None]
tmp429 = tl_math.exp(tmp428)
tmp431 = tl_math.exp(tmp430)
tmp432 = tmp429 + tmp431
tmp434 = tl_math.exp(tmp433)
tmp435 = tmp432 + tmp434
tmp437 = tl_math.exp(tmp436)
tmp438 = tmp435 + tmp437
tmp439 = tl_math.log(tmp438)
tmp440 = tmp428 - tmp439
tmp441 = tmp372 * tmp440
tmp442 = tmp430 - tmp439
tmp443 = tmp387 * tmp442
tmp444 = tmp441 + tmp443
tmp445 = tmp433 - tmp439
tmp446 = tmp391 * tmp445
tmp447 = tmp444 + tmp446
tmp448 = tmp436 - tmp439
tmp449 = tmp395 * tmp448
tmp450 = tmp447 + tmp449
tmp451 = tl.broadcast_to(tmp450, [XBLOCK, RBLOCK])
tmp453 = tl.sum(tmp451, 1)[:, None]
tmp455 = tl_math.exp(tmp454)
tmp457 = tl_math.exp(tmp456)
tmp458 = tmp455 + tmp457
tmp460 = tl_math.exp(tmp459)
tmp461 = tmp458 + tmp460
tmp463 = tl_math.exp(tmp462)
tmp464 = tmp461 + tmp463
tmp465 = tl_math.log(tmp464)
tmp466 = tmp454 - tmp465
tmp467 = tmp372 * tmp466
tmp468 = tmp456 - tmp465
tmp469 = tmp387 * tmp468
tmp470 = tmp467 + tmp469
tmp471 = tmp459 - tmp465
tmp472 = tmp391 * tmp471
tmp473 = tmp470 + tmp472
tmp474 = tmp462 - tmp465
tmp475 = tmp395 * tmp474
tmp476 = tmp473 + tmp475
tmp477 = tl.broadcast_to(tmp476, [XBLOCK, RBLOCK])
tmp479 = tl.sum(tmp477, 1)[:, None]
tmp481 = tl_math.exp(tmp480)
tmp483 = tl_math.exp(tmp482)
tmp484 = tmp481 + tmp483
tmp486 = tl_math.exp(tmp485)
tmp487 = tmp484 + tmp486
tmp489 = tl_math.exp(tmp488)
tmp490 = tmp487 + tmp489
tmp491 = tl_math.log(tmp490)
tmp492 = tmp480 - tmp491
tmp493 = tmp372 * tmp492
tmp494 = tmp482 - tmp491
tmp495 = tmp387 * tmp494
tmp496 = tmp493 + tmp495
tmp497 = tmp485 - tmp491
tmp498 = tmp391 * tmp497
tmp499 = tmp496 + tmp498
tmp500 = tmp488 - tmp491
tmp501 = tmp395 * tmp500
tmp502 = tmp499 + tmp501
tmp503 = tl.broadcast_to(tmp502, [XBLOCK, RBLOCK])
tmp505 = tl.sum(tmp503, 1)[:, None]
tmp507 = tl_math.exp(tmp506)
tmp509 = tl_math.exp(tmp508)
tmp510 = tmp507 + tmp509
tmp512 = tl_math.exp(tmp511)
tmp513 = tmp510 + tmp512
tmp515 = tl_math.exp(tmp514)
tmp516 = tmp513 + tmp515
tmp517 = tl_math.log(tmp516)
tmp518 = tmp506 - tmp517
tmp519 = tmp372 * tmp518
tmp520 = tmp508 - tmp517
tmp521 = tmp387 * tmp520
tmp522 = tmp519 + tmp521
tmp523 = tmp511 - tmp517
tmp524 = tmp391 * tmp523
tmp525 = tmp522 + tmp524
tmp526 = tmp514 - tmp517
tmp527 = tmp395 * tmp526
tmp528 = tmp525 + tmp527
tmp529 = tl.broadcast_to(tmp528, [XBLOCK, RBLOCK])
tmp531 = tl.sum(tmp529, 1)[:, None]
tmp533 = tl_math.exp(tmp532)
tmp535 = tl_math.exp(tmp534)
tmp536 = tmp533 + tmp535
tmp538 = tl_math.exp(tmp537)
tmp539 = tmp536 + tmp538
tmp541 = tl_math.exp(tmp540)
tmp542 = tmp539 + tmp541
tmp543 = tl_math.log(tmp542)
tmp544 = tmp532 - tmp543
tmp545 = tmp372 * tmp544
tmp546 = tmp534 - tmp543
tmp547 = tmp387 * tmp546
tmp548 = tmp545 + tmp547
tmp549 = tmp537 - tmp543
tmp550 = tmp391 * tmp549
tmp551 = tmp548 + tmp550
tmp552 = tmp540 - tmp543
tmp553 = tmp395 * tmp552
tmp554 = tmp551 + tmp553
tmp555 = tl.broadcast_to(tmp554, [XBLOCK, RBLOCK])
tmp557 = tl.sum(tmp555, 1)[:, None]
tmp560 = tl_math.exp(tmp559)
tmp562 = tl_math.exp(tmp561)
tmp563 = tmp560 + tmp562
tmp565 = tl_math.exp(tmp564)
tmp566 = tmp563 + tmp565
tmp568 = tl_math.exp(tmp567)
tmp569 = tmp566 + tmp568
tmp570 = tl_math.log(tmp569)
tmp571 = tmp559 - tmp570
tmp572 = tmp558 * tmp571
tmp574 = tmp561 - tmp570
tmp575 = tmp573 * tmp574
tmp576 = tmp572 + tmp575
tmp578 = tmp564 - tmp570
tmp579 = tmp577 * tmp578
tmp580 = tmp576 + tmp579
tmp582 = tmp567 - tmp570
tmp583 = tmp581 * tmp582
tmp584 = tmp580 + tmp583
tmp585 = tl.broadcast_to(tmp584, [XBLOCK, RBLOCK])
tmp587 = tl.sum(tmp585, 1)[:, None]
tmp589 = tl_math.exp(tmp588)
tmp591 = tl_math.exp(tmp590)
tmp592 = tmp589 + tmp591
tmp594 = tl_math.exp(tmp593)
tmp595 = tmp592 + tmp594
tmp597 = tl_math.exp(tmp596)
tmp598 = tmp595 + tmp597
tmp599 = tl_math.log(tmp598)
tmp600 = tmp588 - tmp599
tmp601 = tmp558 * tmp600
tmp602 = tmp590 - tmp599
tmp603 = tmp573 * tmp602
tmp604 = tmp601 + tmp603
tmp605 = tmp593 - tmp599
tmp606 = tmp577 * tmp605
tmp607 = tmp604 + tmp606
tmp608 = tmp596 - tmp599
tmp609 = tmp581 * tmp608
tmp610 = tmp607 + tmp609
tmp611 = tl.broadcast_to(tmp610, [XBLOCK, RBLOCK])
tmp613 = tl.sum(tmp611, 1)[:, None]
tmp615 = tl_math.exp(tmp614)
tmp617 = tl_math.exp(tmp616)
tmp618 = tmp615 + tmp617
tmp620 = tl_math.exp(tmp619)
tmp621 = tmp618 + tmp620
tmp623 = tl_math.exp(tmp622)
tmp624 = tmp621 + tmp623
tmp625 = tl_math.log(tmp624)
tmp626 = tmp614 - tmp625
tmp627 = tmp558 * tmp626
tmp628 = tmp616 - tmp625
tmp629 = tmp573 * tmp628
tmp630 = tmp627 + tmp629
tmp631 = tmp619 - tmp625
tmp632 = tmp577 * tmp631
tmp633 = tmp630 + tmp632
tmp634 = tmp622 - tmp625
tmp635 = tmp581 * tmp634
tmp636 = tmp633 + tmp635
tmp637 = tl.broadcast_to(tmp636, [XBLOCK, RBLOCK])
tmp639 = tl.sum(tmp637, 1)[:, None]
tmp641 = tl_math.exp(tmp640)
tmp643 = tl_math.exp(tmp642)
tmp644 = tmp641 + tmp643
tmp646 = tl_math.exp(tmp645)
tmp647 = tmp644 + tmp646
tmp649 = tl_math.exp(tmp648)
tmp650 = tmp647 + tmp649
tmp651 = tl_math.log(tmp650)
tmp652 = tmp640 - tmp651
tmp653 = tmp558 * tmp652
tmp654 = tmp642 - tmp651
tmp655 = tmp573 * tmp654
tmp656 = tmp653 + tmp655
tmp657 = tmp645 - tmp651
tmp658 = tmp577 * tmp657
tmp659 = tmp656 + tmp658
tmp660 = tmp648 - tmp651
tmp661 = tmp581 * tmp660
tmp662 = tmp659 + tmp661
tmp663 = tl.broadcast_to(tmp662, [XBLOCK, RBLOCK])
tmp665 = tl.sum(tmp663, 1)[:, None]
tmp667 = tl_math.exp(tmp666)
tmp669 = tl_math.exp(tmp668)
tmp670 = tmp667 + tmp669
tmp672 = tl_math.exp(tmp671)
tmp673 = tmp670 + tmp672
tmp675 = tl_math.exp(tmp674)
tmp676 = tmp673 + tmp675
tmp677 = tl_math.log(tmp676)
tmp678 = tmp666 - tmp677
tmp679 = tmp558 * tmp678
tmp680 = tmp668 - tmp677
tmp681 = tmp573 * tmp680
tmp682 = tmp679 + tmp681
tmp683 = tmp671 - tmp677
tmp684 = tmp577 * tmp683
tmp685 = tmp682 + tmp684
tmp686 = tmp674 - tmp677
tmp687 = tmp581 * tmp686
tmp688 = tmp685 + tmp687
tmp689 = tl.broadcast_to(tmp688, [XBLOCK, RBLOCK])
tmp691 = tl.sum(tmp689, 1)[:, None]
tmp693 = tl_math.exp(tmp692)
tmp695 = tl_math.exp(tmp694)
tmp696 = tmp693 + tmp695
tmp698 = tl_math.exp(tmp697)
tmp699 = tmp696 + tmp698
tmp701 = tl_math.exp(tmp700)
tmp702 = tmp699 + tmp701
tmp703 = tl_math.log(tmp702)
tmp704 = tmp692 - tmp703
tmp705 = tmp558 * tmp704
tmp706 = tmp694 - tmp703
tmp707 = tmp573 * tmp706
tmp708 = tmp705 + tmp707
tmp709 = tmp697 - tmp703
tmp710 = tmp577 * tmp709
tmp711 = tmp708 + tmp710
tmp712 = tmp700 - tmp703
tmp713 = tmp581 * tmp712
tmp714 = tmp711 + tmp713
tmp715 = tl.broadcast_to(tmp714, [XBLOCK, RBLOCK])
tmp717 = tl.sum(tmp715, 1)[:, None]
tmp719 = tl_math.exp(tmp718)
tmp721 = tl_math.exp(tmp720)
tmp722 = tmp719 + tmp721
tmp724 = tl_math.exp(tmp723)
tmp725 = tmp722 + tmp724
tmp727 = tl_math.exp(tmp726)
tmp728 = tmp725 + tmp727
tmp729 = tl_math.log(tmp728)
tmp730 = tmp718 - tmp729
tmp731 = tmp558 * tmp730
tmp732 = tmp720 - tmp729
tmp733 = tmp573 * tmp732
tmp734 = tmp731 + tmp733
tmp735 = tmp723 - tmp729
tmp736 = tmp577 * tmp735
tmp737 = tmp734 + tmp736
tmp738 = tmp726 - tmp729
tmp739 = tmp581 * tmp738
tmp740 = tmp737 + tmp739
tmp741 = tl.broadcast_to(tmp740, [XBLOCK, RBLOCK])
tmp743 = tl.sum(tmp741, 1)[:, None]
tmp744 = 4.0
tmp745 = tmp587 / tmp744
tmp746 = -tmp745
tmp747 = 0.0
tmp748 = tmp746 + tmp747
tmp749 = tmp613 / tmp744
tmp750 = -tmp749
tmp751 = tmp748 + tmp750
tmp752 = tmp639 / tmp744
tmp753 = -tmp752
tmp754 = tmp751 + tmp753
tmp755 = tmp665 / tmp744
tmp756 = -tmp755
tmp757 = tmp754 + tmp756
tmp758 = tmp691 / tmp744
tmp759 = -tmp758
tmp760 = tmp757 + tmp759
tmp761 = tmp717 / tmp744
tmp762 = -tmp761
tmp763 = tmp760 + tmp762
tmp764 = tmp743 / tmp744
tmp765 = -tmp764
tmp766 = tmp763 + tmp765
tmp767 = 0.14285714285714285
tmp768 = tmp766 * tmp767
tmp769 = tmp401 / tmp744
tmp770 = -tmp769
tmp771 = tmp770 + tmp747
tmp772 = tmp427 / tmp744
tmp773 = -tmp772
tmp774 = tmp771 + tmp773
tmp775 = tmp453 / tmp744
tmp776 = -tmp775
tmp777 = tmp774 + tmp776
tmp778 = tmp479 / tmp744
tmp779 = -tmp778
tmp780 = tmp777 + tmp779
tmp781 = tmp505 / tmp744
tmp782 = -tmp781
tmp783 = tmp780 + tmp782
tmp784 = tmp531 / tmp744
tmp785 = -tmp784
tmp786 = tmp783 + tmp785
tmp787 = tmp557 / tmp744
tmp788 = -tmp787
tmp789 = tmp786 + tmp788
tmp790 = tmp789 * tmp767
tmp791 = tmp215 / tmp744
tmp792 = -tmp791
tmp793 = tmp792 + tmp747
tmp794 = tmp241 / tmp744
tmp795 = -tmp794
tmp796 = tmp793 + tmp795
tmp797 = tmp267 / tmp744
tmp798 = -tmp797
tmp799 = tmp796 + tmp798
tmp800 = tmp293 / tmp744
tmp801 = -tmp800
tmp802 = tmp799 + tmp801
tmp803 = tmp319 / tmp744
tmp804 = -tmp803
tmp805 = tmp802 + tmp804
tmp806 = tmp345 / tmp744
tmp807 = -tmp806
tmp808 = tmp805 + tmp807
tmp809 = tmp371 / tmp744
tmp810 = -tmp809
tmp811 = tmp808 + tmp810
tmp812 = tmp811 * tmp767
tmp813 = tmp29 / tmp744
tmp814 = -tmp813
tmp815 = tmp814 + tmp747
tmp816 = tmp55 / tmp744
tmp817 = -tmp816
tmp818 = tmp815 + tmp817
tmp819 = tmp81 / tmp744
tmp820 = -tmp819
tmp821 = tmp818 + tmp820
tmp822 = tmp107 / tmp744
tmp823 = -tmp822
tmp824 = tmp821 + tmp823
tmp825 = tmp133 / tmp744
tmp826 = -tmp825
tmp827 = tmp824 + tmp826
tmp828 = tmp159 / tmp744
tmp829 = -tmp828
tmp830 = tmp827 + tmp829
tmp831 = tmp185 / tmp744
tmp832 = -tmp831
tmp833 = tmp830 + tmp832
tmp834 = tmp833 * tmp767
tmp835 = tmp768 + tmp747
tmp836 = tmp835 + tmp790
tmp837 = tmp836 + tmp812
tmp838 = tmp837 + tmp834
tmp839 = 0.25
tmp840 = tmp838 * tmp839
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp840, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, buf0, 1, 16, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
triton_poi_fused_sum_1.run(arg0_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
triton_poi_fused_sum_2.run(arg0_1, buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
triton_poi_fused_sum_3.run(arg0_1, buf0, buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
triton_poi_fused_div_4.run(arg0_1, buf0, buf1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf6, buf7, 16, grid=grid(16), stream=stream0)
buf23 = buf0; del buf0 # reuse
buf56 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse
buf79 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q_1], Original ATen: [aten.sum]
triton_per_fused_sum_8.run(arg0_1, buf23, buf56, buf79, 1, 16, grid=grid(1), stream=stream0)
buf24 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_3], Original ATen: [aten.sum]
triton_poi_fused_sum_9.run(arg0_1, buf23, buf24, 4, grid=grid(4), stream=stream0)
buf25 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [sum_17], Original ATen: [aten.sum]
triton_poi_fused_sum_10.run(arg0_1, buf23, buf24, buf25, 4, grid=grid(4), stream=stream0)
buf26 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_4], Original ATen: [aten.sum]
triton_poi_fused_sum_11.run(arg0_1, buf23, buf24, buf25, buf26, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_22], Original ATen: [aten.div]
triton_poi_fused_div_12.run(arg0_1, buf23, buf24, buf25, buf26, buf8, buf27, 16, grid=grid(16), stream=stream0)
buf46 = buf23; del buf23 # reuse
buf81 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q_2], Original ATen: [aten.sum]
triton_per_fused_sum_13.run(arg0_1, buf46, buf81, 1, 16, grid=grid(1), stream=stream0)
buf47 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_6], Original ATen: [aten.sum]
triton_poi_fused_sum_14.run(arg0_1, buf46, buf47, 4, grid=grid(4), stream=stream0)
buf48 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [sum_31], Original ATen: [aten.sum]
triton_poi_fused_sum_15.run(arg0_1, buf46, buf47, buf48, 4, grid=grid(4), stream=stream0)
buf49 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_7], Original ATen: [aten.sum]
triton_poi_fused_sum_16.run(arg0_1, buf46, buf47, buf48, buf49, 4, grid=grid(4), stream=stream0)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf50 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_37], Original ATen: [aten.div]
triton_poi_fused_div_17.run(arg0_1, buf46, buf47, buf48, buf49, buf10, buf33, buf50, 16, grid=grid(16), stream=stream0)
buf69 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [sum_Q_3], Original ATen: [aten.sum]
triton_per_fused_sum_18.run(arg0_1, buf69, 1, 16, grid=grid(1), stream=stream0)
buf70 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_9], Original ATen: [aten.sum]
triton_poi_fused_sum_19.run(arg0_1, buf69, buf70, 4, grid=grid(4), stream=stream0)
buf71 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [sum_45], Original ATen: [aten.sum]
triton_poi_fused_sum_20.run(arg0_1, buf69, buf70, buf71, 4, grid=grid(4), stream=stream0)
buf72 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_10], Original ATen: [aten.sum]
triton_poi_fused_sum_21.run(arg0_1, buf69, buf70, buf71, buf72, 4, grid=grid(4), stream=stream0)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf35 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf58 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf73 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_52], Original ATen: [aten.div]
triton_poi_fused_div_22.run(arg0_1, buf69, buf70, buf71, buf72, buf12, buf35, buf58, buf73, 16, grid=grid(16), stream=stream0)
del buf70
del buf71
del buf72
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf60 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_23.run(arg1_1, buf14, buf37, buf60, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf62 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_24.run(arg1_1, buf16, buf39, buf62, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf64 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_25.run(arg1_1, buf18, buf41, buf64, 16, grid=grid(16), stream=stream0)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf66 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_26.run(arg1_1, buf20, buf43, buf66, 16, grid=grid(16), stream=stream0)
buf28 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_24], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf27, buf28, 16, grid=grid(16), stream=stream0)
buf29 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [Q_26], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf28, buf29, 16, grid=grid(16), stream=stream0)
buf30 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [Q_29], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf29, buf30, 16, grid=grid(16), stream=stream0)
buf31 = reinterpret_tensor(buf29, (4, 4), (4, 1), 0); del buf29 # reuse
buf54 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf77 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_23.run(arg0_1, buf31, buf54, buf77, 16, grid=grid(16), stream=stream0)
del arg0_1
buf51 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_39], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf50, buf51, 16, grid=grid(16), stream=stream0)
buf52 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [Q_41], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf51, buf52, 16, grid=grid(16), stream=stream0)
buf53 = buf51; del buf51 # reuse
# Topologically Sorted Source Nodes: [Q_44], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf52, buf53, 16, grid=grid(16), stream=stream0)
buf74 = buf52; del buf52 # reuse
# Topologically Sorted Source Nodes: [Q_54], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf73, buf74, 16, grid=grid(16), stream=stream0)
buf75 = buf73; del buf73 # reuse
# Topologically Sorted Source Nodes: [Q_56], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf74, buf75, 16, grid=grid(16), stream=stream0)
buf76 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [Q_59], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf75, buf76, 16, grid=grid(16), stream=stream0)
buf83 = reinterpret_tensor(buf75, (4, 4), (4, 1), 0); del buf75 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_27.run(arg1_1, buf83, 16, grid=grid(16), stream=stream0)
buf85 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_28.run(arg1_1, buf85, 16, grid=grid(16), stream=stream0)
buf87 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_29.run(arg1_1, buf87, 16, grid=grid(16), stream=stream0)
buf89 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_30.run(arg1_1, buf89, 16, grid=grid(16), stream=stream0)
del arg1_1
buf11 = buf69; del buf69 # reuse
buf22 = buf11; del buf11 # reuse
buf92 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [log_softmax, mul, sum_8, mean, neg, subloss, log_softmax_1, mul_1, sum_9, mean_1, neg_1, subloss_1, log_softmax_2, mul_2, sum_10, mean_2, neg_2, subloss_2, log_softmax_3, mul_3, sum_11, mean_3, neg_3, subloss_3, log_softmax_4, mul_4, sum_12, mean_4, neg_4, subloss_4, log_softmax_5, mul_5, sum_13, mean_5, neg_5, subloss_5, log_softmax_6, mul_6, sum_14, mean_6, neg_6, subloss_6, truediv_8, loss, log_softmax_7, mul_7, sum_22, mean_7, neg_7, subloss_7, log_softmax_8, mul_8, sum_23, mean_8, neg_8, subloss_8, log_softmax_9, mul_9, sum_24, mean_9, neg_9, subloss_9, log_softmax_10, mul_10, sum_25, mean_10, neg_10, subloss_10, log_softmax_11, mul_11, sum_26, mean_11, neg_11, subloss_11, log_softmax_12, mul_12, sum_27, mean_12, neg_12, subloss_12, log_softmax_13, mul_13, sum_28, mean_13, neg_13, subloss_13, truediv_17, loss_1, log_softmax_14, mul_14, sum_36, mean_14, neg_14, subloss_14, log_softmax_15, mul_15, sum_37, mean_15, neg_15, subloss_15, log_softmax_16, mul_16, sum_38, mean_16, neg_16, subloss_16, log_softmax_17, mul_17, sum_39, mean_17, neg_17, subloss_17, log_softmax_18, mul_18, sum_40, mean_18, neg_18, subloss_18, log_softmax_19, mul_19, sum_41, mean_19, neg_19, subloss_19, log_softmax_20, mul_20, sum_42, mean_20, neg_20, subloss_20, truediv_26, loss_2, log_softmax_21, mul_21, sum_50, mean_21, neg_21, subloss_21, log_softmax_22, mul_22, sum_51, mean_22, neg_22, subloss_22, log_softmax_23, mul_23, sum_52, mean_23, neg_23, subloss_23, log_softmax_24, mul_24, sum_53, mean_24, neg_24, subloss_24, log_softmax_25, mul_25, sum_54, mean_25, neg_25, subloss_25, log_softmax_26, mul_26, sum_55, mean_26, neg_26, subloss_26, log_softmax_27, mul_27, sum_56, mean_27, neg_27, subloss_27, truediv_35, loss_3, truediv_36], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.mean, aten.neg, aten.add, aten.div]
triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31.run(buf92, buf76, buf77, buf79, buf81, buf83, buf85, buf87, buf89, buf53, buf54, buf56, buf58, buf60, buf62, buf64, buf66, buf30, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf7, buf8, buf10, buf12, buf14, buf16, buf18, buf20, 1, 4, grid=grid(1), stream=stream0)
del buf10
del buf12
del buf14
del buf16
del buf18
del buf20
del buf30
del buf31
del buf33
del buf35
del buf37
del buf39
del buf41
del buf43
del buf53
del buf54
del buf56
del buf58
del buf60
del buf62
del buf64
del buf66
del buf7
del buf76
del buf77
del buf79
del buf8
del buf81
del buf83
del buf85
del buf87
del buf89
return (buf92, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from typing import List
import torch.nn as nn
@torch.no_grad()
def sinkhorn(out: 'torch.Tensor', iterations: 'int'=3, epsilon: 'float'=0.05):
"""Distributed sinkhorn algorithm.
As outlined in [0] and implemented in [1].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
[1]: https://github.com/facebookresearch/swav/
Args:
out:
Similarity of the features and the SwaV prototypes.
iterations:
Number of sinkhorn iterations.
epsilon:
Temperature parameter.
Returns:
Soft codes Q assigning each feature to a prototype.
"""
Q = torch.exp(out / epsilon).t()
sum_Q = torch.sum(Q)
Q /= sum_Q
B = Q.shape[1]
K = Q.shape[0]
for i in range(iterations):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
class SwaVLoss(nn.Module):
"""Implementation of the SwaV loss.
Attributes:
temperature:
Temperature parameter used for cross entropy calculations.
sinkhorn_iterations:
Number of iterations of the sinkhorn algorithm.
sinkhorn_epsilon:
Temperature parameter used in the sinkhorn algorithm.
"""
def __init__(self, temperature: 'float'=0.1, sinkhorn_iterations: 'int'
=3, sinkhorn_epsilon: 'float'=0.05):
super(SwaVLoss, self).__init__()
self.temperature = temperature
self.sinkhorn_iterations = sinkhorn_iterations
self.sinkhorn_epsilon = sinkhorn_epsilon
def subloss(self, z: 'torch.Tensor', q: 'torch.Tensor'):
"""Calculates the cross entropy for the SwaV prediction problem.
Args:
z:
Similarity of the features and the SwaV prototypes.
q:
Codes obtained from Sinkhorn iterations.
Returns:
Cross entropy between predictions z and codes q.
"""
return -torch.mean(torch.sum(q * F.log_softmax(z / self.temperature,
dim=1), dim=1))
def forward(self, high_resolution_outputs: 'List[torch.Tensor]',
low_resolution_outputs: 'List[torch.Tensor]'):
"""Computes the SwaV loss for a set of high and low resolution outputs.
Args:
high_resolution_outputs:
List of similarities of features and SwaV prototypes for the
high resolution crops.
low_resolution_outputs:
List of similarities of features and SwaV prototypes for the
low resolution crops.
Returns:
Swapping assignments between views loss (SwaV) as described in [0].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
"""
n_crops = len(high_resolution_outputs) + len(low_resolution_outputs)
loss = 0.0
for i in range(len(high_resolution_outputs)):
with torch.no_grad():
q = sinkhorn(high_resolution_outputs[i].detach(),
iterations=self.sinkhorn_iterations, epsilon=self.
sinkhorn_epsilon)
subloss = 0.0
for v in range(len(high_resolution_outputs)):
if v != i:
subloss += self.subloss(high_resolution_outputs[v], q)
for v in range(len(low_resolution_outputs)):
subloss += self.subloss(low_resolution_outputs[v], q)
loss += subloss / (n_crops - 1)
return loss / len(high_resolution_outputs)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_per_fused_sum_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + (16 + r0), None)
tmp9 = tl.load(in_ptr0 + (16 + 4 * r2), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (17 + 4 * r2), None, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (18 + 4 * r2), None, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (19 + 4 * r2), None, eviction_policy='evict_last'
)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (17 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (18 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (19 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (17 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (18 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (19 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp32, xmask)
@triton.jit
def triton_per_fused_sum_13(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + (32 + r0), None)
tmp9 = tl.load(in_ptr0 + (32 + 4 * r2), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (33 + 4 * r2), None, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (34 + 4 * r2), None, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (35 + 4 * r2), None, eviction_policy='evict_last'
)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (33 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (34 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (35 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (33 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (34 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (35 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp32, xmask)
@triton.jit
def triton_per_fused_sum_18(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (48 + r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (49 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (50 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (51 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (49 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (50 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (51 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
tl.store(out_ptr3 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_23(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (17 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (18 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (19 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (33 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (34 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (35 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_26(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (49 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (50 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (51 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_27(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_28(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (17 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (18 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (19 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_29(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (33 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (34 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (35 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_30(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (49 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (50 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (51 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14,
in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21,
in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28,
in_ptr29, in_ptr30, in_ptr31, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr4 + 4 * r0, None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr4 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp87 = tl.load(in_ptr4 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr4 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr5 + 4 * r0, None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr5 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp113 = tl.load(in_ptr5 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp116 = tl.load(in_ptr5 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp134 = tl.load(in_ptr6 + 4 * r0, None, eviction_policy='evict_last')
tmp136 = tl.load(in_ptr6 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp139 = tl.load(in_ptr6 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp142 = tl.load(in_ptr6 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp160 = tl.load(in_ptr7 + 4 * r0, None, eviction_policy='evict_last')
tmp162 = tl.load(in_ptr7 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp165 = tl.load(in_ptr7 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp168 = tl.load(in_ptr7 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp186 = tl.load(in_ptr8 + 4 * r0, None, eviction_policy='evict_last')
tmp187 = tl.load(in_ptr9 + 4 * r0, None, eviction_policy='evict_last')
tmp189 = tl.load(in_ptr9 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp192 = tl.load(in_ptr9 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp195 = tl.load(in_ptr9 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp201 = tl.load(in_ptr8 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp205 = tl.load(in_ptr8 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp209 = tl.load(in_ptr8 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp216 = tl.load(in_ptr10 + 4 * r0, None, eviction_policy='evict_last')
tmp218 = tl.load(in_ptr10 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp221 = tl.load(in_ptr10 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp224 = tl.load(in_ptr10 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp242 = tl.load(in_ptr11 + 4 * r0, None, eviction_policy='evict_last')
tmp244 = tl.load(in_ptr11 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp247 = tl.load(in_ptr11 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp250 = tl.load(in_ptr11 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp268 = tl.load(in_ptr12 + 4 * r0, None, eviction_policy='evict_last')
tmp270 = tl.load(in_ptr12 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp273 = tl.load(in_ptr12 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp276 = tl.load(in_ptr12 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp294 = tl.load(in_ptr13 + 4 * r0, None, eviction_policy='evict_last')
tmp296 = tl.load(in_ptr13 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp299 = tl.load(in_ptr13 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp302 = tl.load(in_ptr13 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp320 = tl.load(in_ptr14 + 4 * r0, None, eviction_policy='evict_last')
tmp322 = tl.load(in_ptr14 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp325 = tl.load(in_ptr14 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp328 = tl.load(in_ptr14 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp346 = tl.load(in_ptr15 + 4 * r0, None, eviction_policy='evict_last')
tmp348 = tl.load(in_ptr15 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp351 = tl.load(in_ptr15 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp354 = tl.load(in_ptr15 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp372 = tl.load(in_ptr16 + 4 * r0, None, eviction_policy='evict_last')
tmp373 = tl.load(in_ptr17 + 4 * r0, None, eviction_policy='evict_last')
tmp375 = tl.load(in_ptr17 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp378 = tl.load(in_ptr17 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp381 = tl.load(in_ptr17 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp387 = tl.load(in_ptr16 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp391 = tl.load(in_ptr16 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp395 = tl.load(in_ptr16 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp402 = tl.load(in_ptr18 + 4 * r0, None, eviction_policy='evict_last')
tmp404 = tl.load(in_ptr18 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp407 = tl.load(in_ptr18 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp410 = tl.load(in_ptr18 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp428 = tl.load(in_ptr19 + 4 * r0, None, eviction_policy='evict_last')
tmp430 = tl.load(in_ptr19 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp433 = tl.load(in_ptr19 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp436 = tl.load(in_ptr19 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp454 = tl.load(in_ptr20 + 4 * r0, None, eviction_policy='evict_last')
tmp456 = tl.load(in_ptr20 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp459 = tl.load(in_ptr20 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp462 = tl.load(in_ptr20 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp480 = tl.load(in_ptr21 + 4 * r0, None, eviction_policy='evict_last')
tmp482 = tl.load(in_ptr21 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp485 = tl.load(in_ptr21 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp488 = tl.load(in_ptr21 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp506 = tl.load(in_ptr22 + 4 * r0, None, eviction_policy='evict_last')
tmp508 = tl.load(in_ptr22 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp511 = tl.load(in_ptr22 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp514 = tl.load(in_ptr22 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp532 = tl.load(in_ptr23 + 4 * r0, None, eviction_policy='evict_last')
tmp534 = tl.load(in_ptr23 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp537 = tl.load(in_ptr23 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp540 = tl.load(in_ptr23 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp558 = tl.load(in_ptr24 + 4 * r0, None, eviction_policy='evict_last')
tmp559 = tl.load(in_ptr25 + 4 * r0, None, eviction_policy='evict_last')
tmp561 = tl.load(in_ptr25 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp564 = tl.load(in_ptr25 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp567 = tl.load(in_ptr25 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp573 = tl.load(in_ptr24 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp577 = tl.load(in_ptr24 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp581 = tl.load(in_ptr24 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp588 = tl.load(in_ptr26 + 4 * r0, None, eviction_policy='evict_last')
tmp590 = tl.load(in_ptr26 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp593 = tl.load(in_ptr26 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp596 = tl.load(in_ptr26 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp614 = tl.load(in_ptr27 + 4 * r0, None, eviction_policy='evict_last')
tmp616 = tl.load(in_ptr27 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp619 = tl.load(in_ptr27 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp622 = tl.load(in_ptr27 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp640 = tl.load(in_ptr28 + 4 * r0, None, eviction_policy='evict_last')
tmp642 = tl.load(in_ptr28 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp645 = tl.load(in_ptr28 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp648 = tl.load(in_ptr28 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp666 = tl.load(in_ptr29 + 4 * r0, None, eviction_policy='evict_last')
tmp668 = tl.load(in_ptr29 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp671 = tl.load(in_ptr29 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp674 = tl.load(in_ptr29 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp692 = tl.load(in_ptr30 + 4 * r0, None, eviction_policy='evict_last')
tmp694 = tl.load(in_ptr30 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp697 = tl.load(in_ptr30 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp700 = tl.load(in_ptr30 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp718 = tl.load(in_ptr31 + 4 * r0, None, eviction_policy='evict_last')
tmp720 = tl.load(in_ptr31 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp723 = tl.load(in_ptr31 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp726 = tl.load(in_ptr31 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp31 = tl_math.exp(tmp30)
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tl_math.log(tmp40)
tmp42 = tmp30 - tmp41
tmp43 = tmp0 * tmp42
tmp44 = tmp32 - tmp41
tmp45 = tmp15 * tmp44
tmp46 = tmp43 + tmp45
tmp47 = tmp35 - tmp41
tmp48 = tmp19 * tmp47
tmp49 = tmp46 + tmp48
tmp50 = tmp38 - tmp41
tmp51 = tmp23 * tmp50
tmp52 = tmp49 + tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = tl.sum(tmp53, 1)[:, None]
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp56 - tmp67
tmp69 = tmp0 * tmp68
tmp70 = tmp58 - tmp67
tmp71 = tmp15 * tmp70
tmp72 = tmp69 + tmp71
tmp73 = tmp61 - tmp67
tmp74 = tmp19 * tmp73
tmp75 = tmp72 + tmp74
tmp76 = tmp64 - tmp67
tmp77 = tmp23 * tmp76
tmp78 = tmp75 + tmp77
tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK])
tmp81 = tl.sum(tmp79, 1)[:, None]
tmp83 = tl_math.exp(tmp82)
tmp85 = tl_math.exp(tmp84)
tmp86 = tmp83 + tmp85
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp86 + tmp88
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tl_math.log(tmp92)
tmp94 = tmp82 - tmp93
tmp95 = tmp0 * tmp94
tmp96 = tmp84 - tmp93
tmp97 = tmp15 * tmp96
tmp98 = tmp95 + tmp97
tmp99 = tmp87 - tmp93
tmp100 = tmp19 * tmp99
tmp101 = tmp98 + tmp100
tmp102 = tmp90 - tmp93
tmp103 = tmp23 * tmp102
tmp104 = tmp101 + tmp103
tmp105 = tl.broadcast_to(tmp104, [XBLOCK, RBLOCK])
tmp107 = tl.sum(tmp105, 1)[:, None]
tmp109 = tl_math.exp(tmp108)
tmp111 = tl_math.exp(tmp110)
tmp112 = tmp109 + tmp111
tmp114 = tl_math.exp(tmp113)
tmp115 = tmp112 + tmp114
tmp117 = tl_math.exp(tmp116)
tmp118 = tmp115 + tmp117
tmp119 = tl_math.log(tmp118)
tmp120 = tmp108 - tmp119
tmp121 = tmp0 * tmp120
tmp122 = tmp110 - tmp119
tmp123 = tmp15 * tmp122
tmp124 = tmp121 + tmp123
tmp125 = tmp113 - tmp119
tmp126 = tmp19 * tmp125
tmp127 = tmp124 + tmp126
tmp128 = tmp116 - tmp119
tmp129 = tmp23 * tmp128
tmp130 = tmp127 + tmp129
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp133 = tl.sum(tmp131, 1)[:, None]
tmp135 = tl_math.exp(tmp134)
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tmp143 = tl_math.exp(tmp142)
tmp144 = tmp141 + tmp143
tmp145 = tl_math.log(tmp144)
tmp146 = tmp134 - tmp145
tmp147 = tmp0 * tmp146
tmp148 = tmp136 - tmp145
tmp149 = tmp15 * tmp148
tmp150 = tmp147 + tmp149
tmp151 = tmp139 - tmp145
tmp152 = tmp19 * tmp151
tmp153 = tmp150 + tmp152
tmp154 = tmp142 - tmp145
tmp155 = tmp23 * tmp154
tmp156 = tmp153 + tmp155
tmp157 = tl.broadcast_to(tmp156, [XBLOCK, RBLOCK])
tmp159 = tl.sum(tmp157, 1)[:, None]
tmp161 = tl_math.exp(tmp160)
tmp163 = tl_math.exp(tmp162)
tmp164 = tmp161 + tmp163
tmp166 = tl_math.exp(tmp165)
tmp167 = tmp164 + tmp166
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp167 + tmp169
tmp171 = tl_math.log(tmp170)
tmp172 = tmp160 - tmp171
tmp173 = tmp0 * tmp172
tmp174 = tmp162 - tmp171
tmp175 = tmp15 * tmp174
tmp176 = tmp173 + tmp175
tmp177 = tmp165 - tmp171
tmp178 = tmp19 * tmp177
tmp179 = tmp176 + tmp178
tmp180 = tmp168 - tmp171
tmp181 = tmp23 * tmp180
tmp182 = tmp179 + tmp181
tmp183 = tl.broadcast_to(tmp182, [XBLOCK, RBLOCK])
tmp185 = tl.sum(tmp183, 1)[:, None]
tmp188 = tl_math.exp(tmp187)
tmp190 = tl_math.exp(tmp189)
tmp191 = tmp188 + tmp190
tmp193 = tl_math.exp(tmp192)
tmp194 = tmp191 + tmp193
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp194 + tmp196
tmp198 = tl_math.log(tmp197)
tmp199 = tmp187 - tmp198
tmp200 = tmp186 * tmp199
tmp202 = tmp189 - tmp198
tmp203 = tmp201 * tmp202
tmp204 = tmp200 + tmp203
tmp206 = tmp192 - tmp198
tmp207 = tmp205 * tmp206
tmp208 = tmp204 + tmp207
tmp210 = tmp195 - tmp198
tmp211 = tmp209 * tmp210
tmp212 = tmp208 + tmp211
tmp213 = tl.broadcast_to(tmp212, [XBLOCK, RBLOCK])
tmp215 = tl.sum(tmp213, 1)[:, None]
tmp217 = tl_math.exp(tmp216)
tmp219 = tl_math.exp(tmp218)
tmp220 = tmp217 + tmp219
tmp222 = tl_math.exp(tmp221)
tmp223 = tmp220 + tmp222
tmp225 = tl_math.exp(tmp224)
tmp226 = tmp223 + tmp225
tmp227 = tl_math.log(tmp226)
tmp228 = tmp216 - tmp227
tmp229 = tmp186 * tmp228
tmp230 = tmp218 - tmp227
tmp231 = tmp201 * tmp230
tmp232 = tmp229 + tmp231
tmp233 = tmp221 - tmp227
tmp234 = tmp205 * tmp233
tmp235 = tmp232 + tmp234
tmp236 = tmp224 - tmp227
tmp237 = tmp209 * tmp236
tmp238 = tmp235 + tmp237
tmp239 = tl.broadcast_to(tmp238, [XBLOCK, RBLOCK])
tmp241 = tl.sum(tmp239, 1)[:, None]
tmp243 = tl_math.exp(tmp242)
tmp245 = tl_math.exp(tmp244)
tmp246 = tmp243 + tmp245
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp246 + tmp248
tmp251 = tl_math.exp(tmp250)
tmp252 = tmp249 + tmp251
tmp253 = tl_math.log(tmp252)
tmp254 = tmp242 - tmp253
tmp255 = tmp186 * tmp254
tmp256 = tmp244 - tmp253
tmp257 = tmp201 * tmp256
tmp258 = tmp255 + tmp257
tmp259 = tmp247 - tmp253
tmp260 = tmp205 * tmp259
tmp261 = tmp258 + tmp260
tmp262 = tmp250 - tmp253
tmp263 = tmp209 * tmp262
tmp264 = tmp261 + tmp263
tmp265 = tl.broadcast_to(tmp264, [XBLOCK, RBLOCK])
tmp267 = tl.sum(tmp265, 1)[:, None]
tmp269 = tl_math.exp(tmp268)
tmp271 = tl_math.exp(tmp270)
tmp272 = tmp269 + tmp271
tmp274 = tl_math.exp(tmp273)
tmp275 = tmp272 + tmp274
tmp277 = tl_math.exp(tmp276)
tmp278 = tmp275 + tmp277
tmp279 = tl_math.log(tmp278)
tmp280 = tmp268 - tmp279
tmp281 = tmp186 * tmp280
tmp282 = tmp270 - tmp279
tmp283 = tmp201 * tmp282
tmp284 = tmp281 + tmp283
tmp285 = tmp273 - tmp279
tmp286 = tmp205 * tmp285
tmp287 = tmp284 + tmp286
tmp288 = tmp276 - tmp279
tmp289 = tmp209 * tmp288
tmp290 = tmp287 + tmp289
tmp291 = tl.broadcast_to(tmp290, [XBLOCK, RBLOCK])
tmp293 = tl.sum(tmp291, 1)[:, None]
tmp295 = tl_math.exp(tmp294)
tmp297 = tl_math.exp(tmp296)
tmp298 = tmp295 + tmp297
tmp300 = tl_math.exp(tmp299)
tmp301 = tmp298 + tmp300
tmp303 = tl_math.exp(tmp302)
tmp304 = tmp301 + tmp303
tmp305 = tl_math.log(tmp304)
tmp306 = tmp294 - tmp305
tmp307 = tmp186 * tmp306
tmp308 = tmp296 - tmp305
tmp309 = tmp201 * tmp308
tmp310 = tmp307 + tmp309
tmp311 = tmp299 - tmp305
tmp312 = tmp205 * tmp311
tmp313 = tmp310 + tmp312
tmp314 = tmp302 - tmp305
tmp315 = tmp209 * tmp314
tmp316 = tmp313 + tmp315
tmp317 = tl.broadcast_to(tmp316, [XBLOCK, RBLOCK])
tmp319 = tl.sum(tmp317, 1)[:, None]
tmp321 = tl_math.exp(tmp320)
tmp323 = tl_math.exp(tmp322)
tmp324 = tmp321 + tmp323
tmp326 = tl_math.exp(tmp325)
tmp327 = tmp324 + tmp326
tmp329 = tl_math.exp(tmp328)
tmp330 = tmp327 + tmp329
tmp331 = tl_math.log(tmp330)
tmp332 = tmp320 - tmp331
tmp333 = tmp186 * tmp332
tmp334 = tmp322 - tmp331
tmp335 = tmp201 * tmp334
tmp336 = tmp333 + tmp335
tmp337 = tmp325 - tmp331
tmp338 = tmp205 * tmp337
tmp339 = tmp336 + tmp338
tmp340 = tmp328 - tmp331
tmp341 = tmp209 * tmp340
tmp342 = tmp339 + tmp341
tmp343 = tl.broadcast_to(tmp342, [XBLOCK, RBLOCK])
tmp345 = tl.sum(tmp343, 1)[:, None]
tmp347 = tl_math.exp(tmp346)
tmp349 = tl_math.exp(tmp348)
tmp350 = tmp347 + tmp349
tmp352 = tl_math.exp(tmp351)
tmp353 = tmp350 + tmp352
tmp355 = tl_math.exp(tmp354)
tmp356 = tmp353 + tmp355
tmp357 = tl_math.log(tmp356)
tmp358 = tmp346 - tmp357
tmp359 = tmp186 * tmp358
tmp360 = tmp348 - tmp357
tmp361 = tmp201 * tmp360
tmp362 = tmp359 + tmp361
tmp363 = tmp351 - tmp357
tmp364 = tmp205 * tmp363
tmp365 = tmp362 + tmp364
tmp366 = tmp354 - tmp357
tmp367 = tmp209 * tmp366
tmp368 = tmp365 + tmp367
tmp369 = tl.broadcast_to(tmp368, [XBLOCK, RBLOCK])
tmp371 = tl.sum(tmp369, 1)[:, None]
tmp374 = tl_math.exp(tmp373)
tmp376 = tl_math.exp(tmp375)
tmp377 = tmp374 + tmp376
tmp379 = tl_math.exp(tmp378)
tmp380 = tmp377 + tmp379
tmp382 = tl_math.exp(tmp381)
tmp383 = tmp380 + tmp382
tmp384 = tl_math.log(tmp383)
tmp385 = tmp373 - tmp384
tmp386 = tmp372 * tmp385
tmp388 = tmp375 - tmp384
tmp389 = tmp387 * tmp388
tmp390 = tmp386 + tmp389
tmp392 = tmp378 - tmp384
tmp393 = tmp391 * tmp392
tmp394 = tmp390 + tmp393
tmp396 = tmp381 - tmp384
tmp397 = tmp395 * tmp396
tmp398 = tmp394 + tmp397
tmp399 = tl.broadcast_to(tmp398, [XBLOCK, RBLOCK])
tmp401 = tl.sum(tmp399, 1)[:, None]
tmp403 = tl_math.exp(tmp402)
tmp405 = tl_math.exp(tmp404)
tmp406 = tmp403 + tmp405
tmp408 = tl_math.exp(tmp407)
tmp409 = tmp406 + tmp408
tmp411 = tl_math.exp(tmp410)
tmp412 = tmp409 + tmp411
tmp413 = tl_math.log(tmp412)
tmp414 = tmp402 - tmp413
tmp415 = tmp372 * tmp414
tmp416 = tmp404 - tmp413
tmp417 = tmp387 * tmp416
tmp418 = tmp415 + tmp417
tmp419 = tmp407 - tmp413
tmp420 = tmp391 * tmp419
tmp421 = tmp418 + tmp420
tmp422 = tmp410 - tmp413
tmp423 = tmp395 * tmp422
tmp424 = tmp421 + tmp423
tmp425 = tl.broadcast_to(tmp424, [XBLOCK, RBLOCK])
tmp427 = tl.sum(tmp425, 1)[:, None]
tmp429 = tl_math.exp(tmp428)
tmp431 = tl_math.exp(tmp430)
tmp432 = tmp429 + tmp431
tmp434 = tl_math.exp(tmp433)
tmp435 = tmp432 + tmp434
tmp437 = tl_math.exp(tmp436)
tmp438 = tmp435 + tmp437
tmp439 = tl_math.log(tmp438)
tmp440 = tmp428 - tmp439
tmp441 = tmp372 * tmp440
tmp442 = tmp430 - tmp439
tmp443 = tmp387 * tmp442
tmp444 = tmp441 + tmp443
tmp445 = tmp433 - tmp439
tmp446 = tmp391 * tmp445
tmp447 = tmp444 + tmp446
tmp448 = tmp436 - tmp439
tmp449 = tmp395 * tmp448
tmp450 = tmp447 + tmp449
tmp451 = tl.broadcast_to(tmp450, [XBLOCK, RBLOCK])
tmp453 = tl.sum(tmp451, 1)[:, None]
tmp455 = tl_math.exp(tmp454)
tmp457 = tl_math.exp(tmp456)
tmp458 = tmp455 + tmp457
tmp460 = tl_math.exp(tmp459)
tmp461 = tmp458 + tmp460
tmp463 = tl_math.exp(tmp462)
tmp464 = tmp461 + tmp463
tmp465 = tl_math.log(tmp464)
tmp466 = tmp454 - tmp465
tmp467 = tmp372 * tmp466
tmp468 = tmp456 - tmp465
tmp469 = tmp387 * tmp468
tmp470 = tmp467 + tmp469
tmp471 = tmp459 - tmp465
tmp472 = tmp391 * tmp471
tmp473 = tmp470 + tmp472
tmp474 = tmp462 - tmp465
tmp475 = tmp395 * tmp474
tmp476 = tmp473 + tmp475
tmp477 = tl.broadcast_to(tmp476, [XBLOCK, RBLOCK])
tmp479 = tl.sum(tmp477, 1)[:, None]
tmp481 = tl_math.exp(tmp480)
tmp483 = tl_math.exp(tmp482)
tmp484 = tmp481 + tmp483
tmp486 = tl_math.exp(tmp485)
tmp487 = tmp484 + tmp486
tmp489 = tl_math.exp(tmp488)
tmp490 = tmp487 + tmp489
tmp491 = tl_math.log(tmp490)
tmp492 = tmp480 - tmp491
tmp493 = tmp372 * tmp492
tmp494 = tmp482 - tmp491
tmp495 = tmp387 * tmp494
tmp496 = tmp493 + tmp495
tmp497 = tmp485 - tmp491
tmp498 = tmp391 * tmp497
tmp499 = tmp496 + tmp498
tmp500 = tmp488 - tmp491
tmp501 = tmp395 * tmp500
tmp502 = tmp499 + tmp501
tmp503 = tl.broadcast_to(tmp502, [XBLOCK, RBLOCK])
tmp505 = tl.sum(tmp503, 1)[:, None]
tmp507 = tl_math.exp(tmp506)
tmp509 = tl_math.exp(tmp508)
tmp510 = tmp507 + tmp509
tmp512 = tl_math.exp(tmp511)
tmp513 = tmp510 + tmp512
tmp515 = tl_math.exp(tmp514)
tmp516 = tmp513 + tmp515
tmp517 = tl_math.log(tmp516)
tmp518 = tmp506 - tmp517
tmp519 = tmp372 * tmp518
tmp520 = tmp508 - tmp517
tmp521 = tmp387 * tmp520
tmp522 = tmp519 + tmp521
tmp523 = tmp511 - tmp517
tmp524 = tmp391 * tmp523
tmp525 = tmp522 + tmp524
tmp526 = tmp514 - tmp517
tmp527 = tmp395 * tmp526
tmp528 = tmp525 + tmp527
tmp529 = tl.broadcast_to(tmp528, [XBLOCK, RBLOCK])
tmp531 = tl.sum(tmp529, 1)[:, None]
tmp533 = tl_math.exp(tmp532)
tmp535 = tl_math.exp(tmp534)
tmp536 = tmp533 + tmp535
tmp538 = tl_math.exp(tmp537)
tmp539 = tmp536 + tmp538
tmp541 = tl_math.exp(tmp540)
tmp542 = tmp539 + tmp541
tmp543 = tl_math.log(tmp542)
tmp544 = tmp532 - tmp543
tmp545 = tmp372 * tmp544
tmp546 = tmp534 - tmp543
tmp547 = tmp387 * tmp546
tmp548 = tmp545 + tmp547
tmp549 = tmp537 - tmp543
tmp550 = tmp391 * tmp549
tmp551 = tmp548 + tmp550
tmp552 = tmp540 - tmp543
tmp553 = tmp395 * tmp552
tmp554 = tmp551 + tmp553
tmp555 = tl.broadcast_to(tmp554, [XBLOCK, RBLOCK])
tmp557 = tl.sum(tmp555, 1)[:, None]
tmp560 = tl_math.exp(tmp559)
tmp562 = tl_math.exp(tmp561)
tmp563 = tmp560 + tmp562
tmp565 = tl_math.exp(tmp564)
tmp566 = tmp563 + tmp565
tmp568 = tl_math.exp(tmp567)
tmp569 = tmp566 + tmp568
tmp570 = tl_math.log(tmp569)
tmp571 = tmp559 - tmp570
tmp572 = tmp558 * tmp571
tmp574 = tmp561 - tmp570
tmp575 = tmp573 * tmp574
tmp576 = tmp572 + tmp575
tmp578 = tmp564 - tmp570
tmp579 = tmp577 * tmp578
tmp580 = tmp576 + tmp579
tmp582 = tmp567 - tmp570
tmp583 = tmp581 * tmp582
tmp584 = tmp580 + tmp583
tmp585 = tl.broadcast_to(tmp584, [XBLOCK, RBLOCK])
tmp587 = tl.sum(tmp585, 1)[:, None]
tmp589 = tl_math.exp(tmp588)
tmp591 = tl_math.exp(tmp590)
tmp592 = tmp589 + tmp591
tmp594 = tl_math.exp(tmp593)
tmp595 = tmp592 + tmp594
tmp597 = tl_math.exp(tmp596)
tmp598 = tmp595 + tmp597
tmp599 = tl_math.log(tmp598)
tmp600 = tmp588 - tmp599
tmp601 = tmp558 * tmp600
tmp602 = tmp590 - tmp599
tmp603 = tmp573 * tmp602
tmp604 = tmp601 + tmp603
tmp605 = tmp593 - tmp599
tmp606 = tmp577 * tmp605
tmp607 = tmp604 + tmp606
tmp608 = tmp596 - tmp599
tmp609 = tmp581 * tmp608
tmp610 = tmp607 + tmp609
tmp611 = tl.broadcast_to(tmp610, [XBLOCK, RBLOCK])
tmp613 = tl.sum(tmp611, 1)[:, None]
tmp615 = tl_math.exp(tmp614)
tmp617 = tl_math.exp(tmp616)
tmp618 = tmp615 + tmp617
tmp620 = tl_math.exp(tmp619)
tmp621 = tmp618 + tmp620
tmp623 = tl_math.exp(tmp622)
tmp624 = tmp621 + tmp623
tmp625 = tl_math.log(tmp624)
tmp626 = tmp614 - tmp625
tmp627 = tmp558 * tmp626
tmp628 = tmp616 - tmp625
tmp629 = tmp573 * tmp628
tmp630 = tmp627 + tmp629
tmp631 = tmp619 - tmp625
tmp632 = tmp577 * tmp631
tmp633 = tmp630 + tmp632
tmp634 = tmp622 - tmp625
tmp635 = tmp581 * tmp634
tmp636 = tmp633 + tmp635
tmp637 = tl.broadcast_to(tmp636, [XBLOCK, RBLOCK])
tmp639 = tl.sum(tmp637, 1)[:, None]
tmp641 = tl_math.exp(tmp640)
tmp643 = tl_math.exp(tmp642)
tmp644 = tmp641 + tmp643
tmp646 = tl_math.exp(tmp645)
tmp647 = tmp644 + tmp646
tmp649 = tl_math.exp(tmp648)
tmp650 = tmp647 + tmp649
tmp651 = tl_math.log(tmp650)
tmp652 = tmp640 - tmp651
tmp653 = tmp558 * tmp652
tmp654 = tmp642 - tmp651
tmp655 = tmp573 * tmp654
tmp656 = tmp653 + tmp655
tmp657 = tmp645 - tmp651
tmp658 = tmp577 * tmp657
tmp659 = tmp656 + tmp658
tmp660 = tmp648 - tmp651
tmp661 = tmp581 * tmp660
tmp662 = tmp659 + tmp661
tmp663 = tl.broadcast_to(tmp662, [XBLOCK, RBLOCK])
tmp665 = tl.sum(tmp663, 1)[:, None]
tmp667 = tl_math.exp(tmp666)
tmp669 = tl_math.exp(tmp668)
tmp670 = tmp667 + tmp669
tmp672 = tl_math.exp(tmp671)
tmp673 = tmp670 + tmp672
tmp675 = tl_math.exp(tmp674)
tmp676 = tmp673 + tmp675
tmp677 = tl_math.log(tmp676)
tmp678 = tmp666 - tmp677
tmp679 = tmp558 * tmp678
tmp680 = tmp668 - tmp677
tmp681 = tmp573 * tmp680
tmp682 = tmp679 + tmp681
tmp683 = tmp671 - tmp677
tmp684 = tmp577 * tmp683
tmp685 = tmp682 + tmp684
tmp686 = tmp674 - tmp677
tmp687 = tmp581 * tmp686
tmp688 = tmp685 + tmp687
tmp689 = tl.broadcast_to(tmp688, [XBLOCK, RBLOCK])
tmp691 = tl.sum(tmp689, 1)[:, None]
tmp693 = tl_math.exp(tmp692)
tmp695 = tl_math.exp(tmp694)
tmp696 = tmp693 + tmp695
tmp698 = tl_math.exp(tmp697)
tmp699 = tmp696 + tmp698
tmp701 = tl_math.exp(tmp700)
tmp702 = tmp699 + tmp701
tmp703 = tl_math.log(tmp702)
tmp704 = tmp692 - tmp703
tmp705 = tmp558 * tmp704
tmp706 = tmp694 - tmp703
tmp707 = tmp573 * tmp706
tmp708 = tmp705 + tmp707
tmp709 = tmp697 - tmp703
tmp710 = tmp577 * tmp709
tmp711 = tmp708 + tmp710
tmp712 = tmp700 - tmp703
tmp713 = tmp581 * tmp712
tmp714 = tmp711 + tmp713
tmp715 = tl.broadcast_to(tmp714, [XBLOCK, RBLOCK])
tmp717 = tl.sum(tmp715, 1)[:, None]
tmp719 = tl_math.exp(tmp718)
tmp721 = tl_math.exp(tmp720)
tmp722 = tmp719 + tmp721
tmp724 = tl_math.exp(tmp723)
tmp725 = tmp722 + tmp724
tmp727 = tl_math.exp(tmp726)
tmp728 = tmp725 + tmp727
tmp729 = tl_math.log(tmp728)
tmp730 = tmp718 - tmp729
tmp731 = tmp558 * tmp730
tmp732 = tmp720 - tmp729
tmp733 = tmp573 * tmp732
tmp734 = tmp731 + tmp733
tmp735 = tmp723 - tmp729
tmp736 = tmp577 * tmp735
tmp737 = tmp734 + tmp736
tmp738 = tmp726 - tmp729
tmp739 = tmp581 * tmp738
tmp740 = tmp737 + tmp739
tmp741 = tl.broadcast_to(tmp740, [XBLOCK, RBLOCK])
tmp743 = tl.sum(tmp741, 1)[:, None]
tmp744 = 4.0
tmp745 = tmp587 / tmp744
tmp746 = -tmp745
tmp747 = 0.0
tmp748 = tmp746 + tmp747
tmp749 = tmp613 / tmp744
tmp750 = -tmp749
tmp751 = tmp748 + tmp750
tmp752 = tmp639 / tmp744
tmp753 = -tmp752
tmp754 = tmp751 + tmp753
tmp755 = tmp665 / tmp744
tmp756 = -tmp755
tmp757 = tmp754 + tmp756
tmp758 = tmp691 / tmp744
tmp759 = -tmp758
tmp760 = tmp757 + tmp759
tmp761 = tmp717 / tmp744
tmp762 = -tmp761
tmp763 = tmp760 + tmp762
tmp764 = tmp743 / tmp744
tmp765 = -tmp764
tmp766 = tmp763 + tmp765
tmp767 = 0.14285714285714285
tmp768 = tmp766 * tmp767
tmp769 = tmp401 / tmp744
tmp770 = -tmp769
tmp771 = tmp770 + tmp747
tmp772 = tmp427 / tmp744
tmp773 = -tmp772
tmp774 = tmp771 + tmp773
tmp775 = tmp453 / tmp744
tmp776 = -tmp775
tmp777 = tmp774 + tmp776
tmp778 = tmp479 / tmp744
tmp779 = -tmp778
tmp780 = tmp777 + tmp779
tmp781 = tmp505 / tmp744
tmp782 = -tmp781
tmp783 = tmp780 + tmp782
tmp784 = tmp531 / tmp744
tmp785 = -tmp784
tmp786 = tmp783 + tmp785
tmp787 = tmp557 / tmp744
tmp788 = -tmp787
tmp789 = tmp786 + tmp788
tmp790 = tmp789 * tmp767
tmp791 = tmp215 / tmp744
tmp792 = -tmp791
tmp793 = tmp792 + tmp747
tmp794 = tmp241 / tmp744
tmp795 = -tmp794
tmp796 = tmp793 + tmp795
tmp797 = tmp267 / tmp744
tmp798 = -tmp797
tmp799 = tmp796 + tmp798
tmp800 = tmp293 / tmp744
tmp801 = -tmp800
tmp802 = tmp799 + tmp801
tmp803 = tmp319 / tmp744
tmp804 = -tmp803
tmp805 = tmp802 + tmp804
tmp806 = tmp345 / tmp744
tmp807 = -tmp806
tmp808 = tmp805 + tmp807
tmp809 = tmp371 / tmp744
tmp810 = -tmp809
tmp811 = tmp808 + tmp810
tmp812 = tmp811 * tmp767
tmp813 = tmp29 / tmp744
tmp814 = -tmp813
tmp815 = tmp814 + tmp747
tmp816 = tmp55 / tmp744
tmp817 = -tmp816
tmp818 = tmp815 + tmp817
tmp819 = tmp81 / tmp744
tmp820 = -tmp819
tmp821 = tmp818 + tmp820
tmp822 = tmp107 / tmp744
tmp823 = -tmp822
tmp824 = tmp821 + tmp823
tmp825 = tmp133 / tmp744
tmp826 = -tmp825
tmp827 = tmp824 + tmp826
tmp828 = tmp159 / tmp744
tmp829 = -tmp828
tmp830 = tmp827 + tmp829
tmp831 = tmp185 / tmp744
tmp832 = -tmp831
tmp833 = tmp830 + tmp832
tmp834 = tmp833 * tmp767
tmp835 = tmp768 + tmp747
tmp836 = tmp835 + tmp790
tmp837 = tmp836 + tmp812
tmp838 = tmp837 + tmp834
tmp839 = 0.25
tmp840 = tmp838 * tmp839
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp840, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_1[grid(4)](arg0_1, buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(4)](arg0_1, buf0, buf1, buf2, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_3[grid(4)](arg0_1, buf0, buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_4[grid(16)](arg0_1, buf0, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_div_6[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_mul_7[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = buf0
del buf0
buf56 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
buf79 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_sum_8[grid(1)](arg0_1, buf23, buf56, buf79, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf24 = buf3
del buf3
triton_poi_fused_sum_9[grid(4)](arg0_1, buf23, buf24, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf25 = buf2
del buf2
triton_poi_fused_sum_10[grid(4)](arg0_1, buf23, buf24, buf25, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf26 = buf1
del buf1
triton_poi_fused_sum_11[grid(4)](arg0_1, buf23, buf24, buf25, buf26,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_12[grid(16)](arg0_1, buf23, buf24, buf25,
buf26, buf8, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf46 = buf23
del buf23
buf81 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_sum_13[grid(1)](arg0_1, buf46, buf81, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf47 = buf26
del buf26
triton_poi_fused_sum_14[grid(4)](arg0_1, buf46, buf47, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf48 = buf25
del buf25
triton_poi_fused_sum_15[grid(4)](arg0_1, buf46, buf47, buf48, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf49 = buf24
del buf24
triton_poi_fused_sum_16[grid(4)](arg0_1, buf46, buf47, buf48, buf49,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf50 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_17[grid(16)](arg0_1, buf46, buf47, buf48,
buf49, buf10, buf33, buf50, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf69 = buf46
del buf46
triton_per_fused_sum_18[grid(1)](arg0_1, buf69, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf70 = buf49
del buf49
triton_poi_fused_sum_19[grid(4)](arg0_1, buf69, buf70, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf71 = buf48
del buf48
triton_poi_fused_sum_20[grid(4)](arg0_1, buf69, buf70, buf71, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf72 = buf47
del buf47
triton_poi_fused_sum_21[grid(4)](arg0_1, buf69, buf70, buf71, buf72,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf35 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf58 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf73 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_22[grid(16)](arg0_1, buf69, buf70, buf71,
buf72, buf12, buf35, buf58, buf73, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf70
del buf71
del buf72
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf60 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_23[grid(16)](arg1_1, buf14, buf37, buf60, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf62 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_24[grid(16)](arg1_1, buf16, buf39, buf62, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf64 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_25[grid(16)](arg1_1, buf18, buf41, buf64, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf66 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_26[grid(16)](arg1_1, buf20, buf43, buf66, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf27, buf28, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf29 = buf27
del buf27
triton_poi_fused_div_6[grid(16)](buf28, buf29, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf30 = buf28
del buf28
triton_poi_fused_mul_7[grid(16)](buf29, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf31 = reinterpret_tensor(buf29, (4, 4), (4, 1), 0)
del buf29
buf54 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf77 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_23[grid(16)](arg0_1, buf31, buf54, buf77, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf51 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf50, buf51, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf52 = buf50
del buf50
triton_poi_fused_div_6[grid(16)](buf51, buf52, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf53 = buf51
del buf51
triton_poi_fused_mul_7[grid(16)](buf52, buf53, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf74 = buf52
del buf52
triton_poi_fused_div_5[grid(16)](buf73, buf74, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf75 = buf73
del buf73
triton_poi_fused_div_6[grid(16)](buf74, buf75, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf76 = buf74
del buf74
triton_poi_fused_mul_7[grid(16)](buf75, buf76, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf83 = reinterpret_tensor(buf75, (4, 4), (4, 1), 0)
del buf75
triton_poi_fused_27[grid(16)](arg1_1, buf83, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf85 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_28[grid(16)](arg1_1, buf85, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf87 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_29[grid(16)](arg1_1, buf87, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf89 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_30[grid(16)](arg1_1, buf89, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg1_1
buf11 = buf69
del buf69
buf22 = buf11
del buf11
buf92 = buf22
del buf22
triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31[grid(1)](
buf92, buf76, buf77, buf79, buf81, buf83, buf85, buf87, buf89,
buf53, buf54, buf56, buf58, buf60, buf62, buf64, buf66, buf30,
buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf7, buf8,
buf10, buf12, buf14, buf16, buf18, buf20, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf10
del buf12
del buf14
del buf16
del buf18
del buf20
del buf30
del buf31
del buf33
del buf35
del buf37
del buf39
del buf41
del buf43
del buf53
del buf54
del buf56
del buf58
del buf60
del buf62
del buf64
del buf66
del buf7
del buf76
del buf77
del buf79
del buf8
del buf81
del buf83
del buf85
del buf87
del buf89
return buf92,
@torch.no_grad()
def sinkhorn(out: 'torch.Tensor', iterations: 'int'=3, epsilon: 'float'=0.05):
"""Distributed sinkhorn algorithm.
As outlined in [0] and implemented in [1].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
[1]: https://github.com/facebookresearch/swav/
Args:
out:
Similarity of the features and the SwaV prototypes.
iterations:
Number of sinkhorn iterations.
epsilon:
Temperature parameter.
Returns:
Soft codes Q assigning each feature to a prototype.
"""
Q = torch.exp(out / epsilon).t()
sum_Q = torch.sum(Q)
Q /= sum_Q
B = Q.shape[1]
K = Q.shape[0]
for i in range(iterations):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
class SwaVLossNew(nn.Module):
"""Implementation of the SwaV loss.
Attributes:
temperature:
Temperature parameter used for cross entropy calculations.
sinkhorn_iterations:
Number of iterations of the sinkhorn algorithm.
sinkhorn_epsilon:
Temperature parameter used in the sinkhorn algorithm.
"""
def __init__(self, temperature: 'float'=0.1, sinkhorn_iterations: 'int'
=3, sinkhorn_epsilon: 'float'=0.05):
super(SwaVLossNew, self).__init__()
self.temperature = temperature
self.sinkhorn_iterations = sinkhorn_iterations
self.sinkhorn_epsilon = sinkhorn_epsilon
def subloss(self, z: 'torch.Tensor', q: 'torch.Tensor'):
"""Calculates the cross entropy for the SwaV prediction problem.
Args:
z:
Similarity of the features and the SwaV prototypes.
q:
Codes obtained from Sinkhorn iterations.
Returns:
Cross entropy between predictions z and codes q.
"""
return -torch.mean(torch.sum(q * F.log_softmax(z / self.temperature,
dim=1), dim=1))
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| jianzhnie/self_supervised | SwaVLoss | false | 7,051 | [
"Apache-2.0"
] | 1 | d1e0f31ab032150ab0ad007c1e19773135a5fb79 | https://github.com/jianzhnie/self_supervised/tree/d1e0f31ab032150ab0ad007c1e19773135a5fb79 | import torch
import torch.nn.functional as F
from typing import List
import torch.nn as nn
@torch.no_grad()
def sinkhorn(out: 'torch.Tensor', iterations: 'int'=3, epsilon: 'float'=0.05):
"""Distributed sinkhorn algorithm.
As outlined in [0] and implemented in [1].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
[1]: https://github.com/facebookresearch/swav/
Args:
out:
Similarity of the features and the SwaV prototypes.
iterations:
Number of sinkhorn iterations.
epsilon:
Temperature parameter.
Returns:
Soft codes Q assigning each feature to a prototype.
"""
Q = torch.exp(out / epsilon).t()
sum_Q = torch.sum(Q)
Q /= sum_Q
B = Q.shape[1]
K = Q.shape[0]
for i in range(iterations):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
class Model(nn.Module):
"""Implementation of the SwaV loss.
Attributes:
temperature:
Temperature parameter used for cross entropy calculations.
sinkhorn_iterations:
Number of iterations of the sinkhorn algorithm.
sinkhorn_epsilon:
Temperature parameter used in the sinkhorn algorithm.
"""
def __init__(self, temperature: 'float'=0.1, sinkhorn_iterations: 'int'
=3, sinkhorn_epsilon: 'float'=0.05):
super().__init__()
self.temperature = temperature
self.sinkhorn_iterations = sinkhorn_iterations
self.sinkhorn_epsilon = sinkhorn_epsilon
def subloss(self, z: 'torch.Tensor', q: 'torch.Tensor'):
"""Calculates the cross entropy for the SwaV prediction problem.
Args:
z:
Similarity of the features and the SwaV prototypes.
q:
Codes obtained from Sinkhorn iterations.
Returns:
Cross entropy between predictions z and codes q.
"""
return -torch.mean(torch.sum(q * F.log_softmax(z / self.temperature,
dim=1), dim=1))
def forward(self, high_resolution_outputs: 'List[torch.Tensor]',
low_resolution_outputs: 'List[torch.Tensor]'):
"""Computes the SwaV loss for a set of high and low resolution outputs.
Args:
high_resolution_outputs:
List of similarities of features and SwaV prototypes for the
high resolution crops.
low_resolution_outputs:
List of similarities of features and SwaV prototypes for the
low resolution crops.
Returns:
Swapping assignments between views loss (SwaV) as described in [0].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
"""
n_crops = len(high_resolution_outputs) + len(low_resolution_outputs)
loss = 0.0
for i in range(len(high_resolution_outputs)):
with torch.no_grad():
q = sinkhorn(high_resolution_outputs[i].detach(),
iterations=self.sinkhorn_iterations, epsilon=self.
sinkhorn_epsilon)
subloss = 0.0
for v in range(len(high_resolution_outputs)):
if v != i:
subloss += self.subloss(high_resolution_outputs[v], q)
for v in range(len(low_resolution_outputs)):
subloss += self.subloss(low_resolution_outputs[v], q)
loss += subloss / (n_crops - 1)
return loss / len(high_resolution_outputs)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
UNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xr/cxrgycnwn3a2engcpa6finswtqxdogftbffjavnh5ulttlgpbgyq.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xh/cxhcyt6y7iofxpx5fhkrcpiatyd6zclkxfs7hlxm2vw5njljdfsu.py
# Topologically Sorted Source Nodes: [batch_norm], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# batch_norm => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
triton_red_fused__native_batch_norm_legit_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[32, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 32
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = (xindex // 16)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + ((4096*x0) + (65536*(r2 // 4096)) + (131072*x1) + (r2 % 4096)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr1 + (x3), tmp3, xmask)
tl.store(out_ptr2 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bi/cbizslxpujifo4u7ps34pkxsqczjytzhkpvhv2bbkvtmujw7nydb.py
# Topologically Sorted Source Nodes: [batch_norm], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# batch_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (16*r1)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + (16*r1)), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tmp16 = 16384.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + (x0), tmp20, xmask)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5h/c5h6ia34opuuxi62d6dkejio4tdeyvvyzxb3a3egfscqui5cqvbh.py
# Topologically Sorted Source Nodes: [batch_norm, x], Original ATen: [aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# batch_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# x => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_3 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16384.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qr/cqrddj2z5w457opl6srzuy4qd5rppfusuu4hz5zz5flksdkj34bs.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pp/cppvhuquwkhuq735cq65shrngl5uhg2xfzscker75jvnsp64cr5d.py
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fv/cfv64mhapxe6uvgmszrxj264kq3oyfslkxbp2ihqg54xs7cferth.py
# Topologically Sorted Source Nodes: [batch_norm_3], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# batch_norm_3 => add_6, rsqrt_3, var_mean_3
# Graph fragment:
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_3, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {})
triton_red_fused__native_batch_norm_legit_6 = async_compile.triton('triton_red_fused__native_batch_norm_legit_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[32, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex % 1024
r2 = (rindex // 1024)
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0) + (32768*r2)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hc/chcxzggkbatw3f5npmxdfchl2kqqvbtlxh2cdenizmpjcbw5m5oj.py
# Topologically Sorted Source Nodes: [batch_norm_3, x_4], Original ATen: [aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# batch_norm_3 => add_6, add_7, mul_6, mul_7, rsqrt_3, sub_3, var_mean_3
# x_4 => relu_3
# Graph fragment:
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_3, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_3, %getitem_9), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_3), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %unsqueeze_13), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_15), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_7,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_7 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6n/c6ndbntj2gw5jvp25obdm2vcnpwsvrxbz7zem2b5j3asm6h2y6xw.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_7 => getitem_14, getitem_15
# Graph fragment:
# %getitem_14 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_15 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c5/cc5ffwguve3fnom45q3qvw66t3terecg76iezoyk7gevmytsbk2a.py
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_14, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_9 = async_compile.triton('triton_poi_fused_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aa/caawzdy5yaezoju5wqnasg7o7yehk7sata37xfp66gzehdpweazv.py
# Topologically Sorted Source Nodes: [batch_norm_6], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# batch_norm_6 => add_12, rsqrt_6, var_mean_6
# Graph fragment:
# %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_6, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_16, 1e-05), kwargs = {})
# %rsqrt_6 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_12,), kwargs = {})
triton_per_fused__native_batch_norm_legit_10 = async_compile.triton('triton_per_fused__native_batch_norm_legit_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_10', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 64
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex % 256
r2 = (rindex // 256)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (256*x0) + (16384*r2)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x0), tmp18, None)
tl.store(out_ptr0 + (x0), tmp8, None)
tl.store(out_ptr1 + (x0), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/54/c54n6x3l2putw32m5h6kna3ut3tnlo6ljtp6uzkjbcsyq2gsnie2.py
# Topologically Sorted Source Nodes: [batch_norm_6, x_8], Original ATen: [aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# batch_norm_6 => add_12, add_13, mul_12, mul_13, rsqrt_6, sub_6, var_mean_6
# x_8 => relu_6
# Graph fragment:
# %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_6, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_16, 1e-05), kwargs = {})
# %rsqrt_6 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_12,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_6, %getitem_17), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %rsqrt_6), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_12, %unsqueeze_25), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_13, %unsqueeze_27), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_13,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_11 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n3/cn3jjvgjxbnhybyzxmomcqd6hneh5mwum3axqgnzynd7xo3co3dw.py
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_11 => getitem_22, getitem_23
# Graph fragment:
# %getitem_22 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_23 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/45/c45gx2vjeuqvj4uxxxzeh6awh7wi65zohiuwkycldrc73ioaugaf.py
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_9 => convolution_9
# Graph fragment:
# %convolution_9 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_22, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hv/chvvykkytz5z5bfp4byfx7fsp4oe3xcftvmksvvczsw5vgqvkqgc.py
# Topologically Sorted Source Nodes: [batch_norm_9], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# batch_norm_9 => add_18, rsqrt_9, var_mean_9
# Graph fragment:
# %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_9, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_24, 1e-05), kwargs = {})
# %rsqrt_9 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_18,), kwargs = {})
triton_per_fused__native_batch_norm_legit_14 = async_compile.triton('triton_per_fused__native_batch_norm_legit_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_14', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_14(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex % 64
r2 = (rindex // 64)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0) + (8192*r2)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x0), tmp18, None)
tl.store(out_ptr0 + (x0), tmp8, None)
tl.store(out_ptr1 + (x0), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/x5/cx5e5g57gfsa62ajoempe5jgd7vf6dco5aipsqyzriwiyrpgitad.py
# Topologically Sorted Source Nodes: [batch_norm_9, x_12], Original ATen: [aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# batch_norm_9 => add_18, add_19, mul_18, mul_19, rsqrt_9, sub_9, var_mean_9
# x_12 => relu_9
# Graph fragment:
# %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_9, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_24, 1e-05), kwargs = {})
# %rsqrt_9 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_18,), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_9, %getitem_25), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %rsqrt_9), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %unsqueeze_37), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %unsqueeze_39), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_19,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_15 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/by/cby3qyk24hb4qgiwojv3km626rzzky2cst3j3yxa2kgbzlbuec75.py
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_15 => getitem_30, getitem_31
# Graph fragment:
# %getitem_30 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_31 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_16 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6s/c6sltu66ocnfvnfj5vfexjewbvf3mlv2sgjt5bxcpxbpasonqntj.py
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# Graph fragment:
# %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_30, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_17 = async_compile.triton('triton_poi_fused_convolution_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7z/c7ztlblr7xqbsl6tq6t6kiubpv4m6uy77jsf5kmsrfcwpmmgxs4g.py
# Topologically Sorted Source Nodes: [batch_norm_12], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# batch_norm_12 => add_24, rsqrt_12, var_mean_12
# Graph fragment:
# %var_mean_12 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_12, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_32, 1e-05), kwargs = {})
# %rsqrt_12 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_24,), kwargs = {})
triton_per_fused__native_batch_norm_legit_18 = async_compile.triton('triton_per_fused__native_batch_norm_legit_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_18(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = (rindex // 16)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (4096*r2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/do/cdo367p5krf5643wl2lp7kzymjpyhh77xekuvgpc7j6phr2ztlpp.py
# Topologically Sorted Source Nodes: [batch_norm_12, x_16], Original ATen: [aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# batch_norm_12 => add_24, add_25, mul_24, mul_25, rsqrt_12, sub_12, var_mean_12
# x_16 => relu_12
# Graph fragment:
# %var_mean_12 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_12, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_32, 1e-05), kwargs = {})
# %rsqrt_12 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_24,), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_12, %getitem_33), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %rsqrt_12), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_24, %unsqueeze_49), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_25, %unsqueeze_51), kwargs = {})
# %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_25,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_19 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fb/cfbv5gghhz4fn3m364vqzbyheyz4srxwsliwwlu2irtjrulis32r.py
# Topologically Sorted Source Nodes: [batch_norm_14, x_18], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# batch_norm_14 => add_28, add_29, mul_28, mul_29, rsqrt_14, sub_14, var_mean_14
# x_18 => relu_14
# Graph fragment:
# %var_mean_14 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_14, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_36, 1e-05), kwargs = {})
# %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_28,), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_14, %getitem_37), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %rsqrt_14), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_28, %unsqueeze_49), kwargs = {})
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_29, %unsqueeze_51), kwargs = {})
# %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_29,), kwargs = {})
# %le_12 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_14, 0), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_20 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x3), tmp15, None)
tl.store(out_ptr1 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4t/c4tcufm7mpt6aqlqptbhq52kln32y2khmkkn64cwjycy7vjmvqxk.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_19 => add_30, add_31, convert_element_type, convert_element_type_1, iota, mul_30, mul_31
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_30, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_30, torch.float32), kwargs = {})
# %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_31, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_31, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_21 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_21(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5m/c5maacj3jtwj4eyy5dq4h3mtmlaeanbbzvyj3vfzn7jnmxm4boip.py
# Topologically Sorted Source Nodes: [x_19, x_20], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_19 => _unsafe_index
# x_20 => constant_pad_nd
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_14, [None, None, %unsqueeze_60, %convert_element_type_1]), kwargs = {})
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%_unsafe_index, [0, 1, 0, 1], 0.0), kwargs = {})
triton_poi_fused__unsafe_index_constant_pad_nd_22 = async_compile.triton('triton_poi_fused__unsafe_index_constant_pad_nd_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_constant_pad_nd_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 9) % 9
x0 = xindex % 9
x2 = (xindex // 81)
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x1), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([XBLOCK], 4, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + (x0), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + (4*tmp10) + (16*x2)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3c/c3c5htiq2ehulnymtrosrcqsnvilvfompwrnwfho7hu2l3fdn2kd.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_22 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_11, %convolution_15], 1), kwargs = {})
triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 256
x0 = xindex % 64
x2 = (xindex // 16384)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (8192*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 256, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (64*((-128) + x1)) + (8192*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-128) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ns/cnskur2c52mrxyiovhmdb572itchomuujsnufhu7y73ezbbzyxwg.py
# Topologically Sorted Source Nodes: [batch_norm_17, x_25], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# batch_norm_17 => add_38, add_39, mul_38, mul_39, rsqrt_17, sub_17, var_mean_17
# x_25 => relu_17
# Graph fragment:
# %var_mean_17 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_18, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_38 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_42, 1e-05), kwargs = {})
# %rsqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_38,), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_18, %getitem_43), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %rsqrt_17), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_38, %unsqueeze_62), kwargs = {})
# %add_39 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_39, %unsqueeze_64), kwargs = {})
# %relu_17 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_39,), kwargs = {})
# %le_9 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_17, 0), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_24 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_24(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x3), tmp15, None)
tl.store(out_ptr1 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qv/cqv6ykbkkdiqpwlgc4bg74ynroetpj5lejvledpcgvghal55jkf3.py
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_26 => add_40, add_41, convert_element_type_4, convert_element_type_5, iota_2, mul_40, mul_41
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add_40 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_40, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_40, torch.float32), kwargs = {})
# %add_41 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_41, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_41, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_25 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_25(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pj/cpjjzxu5i7pi4ak5ygssr44dl5hvagmuhw3p36viajtikvpbslnk.py
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_26 => _unsafe_index_1
# x_27 => constant_pad_nd_1
# Graph fragment:
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_17, [None, None, %unsqueeze_73, %convert_element_type_5]), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%_unsafe_index_1, [0, 1, 0, 1], 0.0), kwargs = {})
triton_poi_fused__unsafe_index_constant_pad_nd_26 = async_compile.triton('triton_poi_fused__unsafe_index_constant_pad_nd_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_constant_pad_nd_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_26(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 17) % 17
x0 = xindex % 17
x2 = (xindex // 289)
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 16, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x1), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([XBLOCK], 8, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + (x0), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + (8*tmp10) + (64*x2)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bf/cbfcvtsp5ewbmbyb3aphiqhcctihzgp7xhxvq3sdtddtb6uj736z.py
# Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_29 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_8, %convolution_19], 1), kwargs = {})
triton_poi_fused_cat_27 = async_compile.triton('triton_poi_fused_cat_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_27(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 128
x0 = xindex % 256
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (16384*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 128, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (256*((-64) + x1)) + (16384*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-64) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vr/cvr6qcfrkxtau4st6ykjg2srz5gtjhrrvqsryhjmik6xjowojnp6.py
# Topologically Sorted Source Nodes: [batch_norm_20, x_32], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# batch_norm_20 => add_48, add_49, mul_48, mul_49, rsqrt_20, sub_20, var_mean_20
# x_32 => relu_20
# Graph fragment:
# %var_mean_20 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_22, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_48, 1e-05), kwargs = {})
# %rsqrt_20 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_48,), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_22, %getitem_49), kwargs = {})
# %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %rsqrt_20), kwargs = {})
# %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_48, %unsqueeze_75), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_49, %unsqueeze_77), kwargs = {})
# %relu_20 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_49,), kwargs = {})
# %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_20, 0), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_28 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_28(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x3), tmp15, None)
tl.store(out_ptr1 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/eo/ceohzfsaskcupvt37voxy2gaa7jcdrmg4w2724zb25xkzabdcbms.py
# Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_33 => add_50, add_51, convert_element_type_8, convert_element_type_9, iota_4, mul_50, mul_51
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_4, 1), kwargs = {})
# %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_50, 0), kwargs = {})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_50, torch.float32), kwargs = {})
# %add_51 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {})
# %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_51, 0.5), kwargs = {})
# %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_51, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_29 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_29(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qk/cqkd2lwnzhldswywxuzxeuiynsbclkc4nismtdmigjzaw4f2sfkp.py
# Topologically Sorted Source Nodes: [x_33, x_34], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_33 => _unsafe_index_2
# x_34 => constant_pad_nd_2
# Graph fragment:
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_20, [None, None, %unsqueeze_86, %convert_element_type_9]), kwargs = {})
# %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%_unsafe_index_2, [0, 1, 0, 1], 0.0), kwargs = {})
triton_poi_fused__unsafe_index_constant_pad_nd_30 = async_compile.triton('triton_poi_fused__unsafe_index_constant_pad_nd_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_constant_pad_nd_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_30(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 33) % 33
x0 = xindex % 33
x2 = (xindex // 1089)
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 32, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x1), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([XBLOCK], 16, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + (x0), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + (16*tmp10) + (256*x2)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sz/cszw3gpgw72pseinstaasv4upjsqap6k6e7vudpjzehclf6swm6x.py
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_36 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_5, %convolution_23], 1), kwargs = {})
triton_poi_fused_cat_31 = async_compile.triton('triton_poi_fused_cat_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_31(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 64
x0 = xindex % 1024
x2 = (xindex // 65536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (32768*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 64, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (1024*((-32) + x1)) + (32768*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-32) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/a2/ca2dfmaijs5chw7u7ucb4hnvkcecdj3d3i247qkymwiw6kqtvtz5.py
# Topologically Sorted Source Nodes: [batch_norm_23, x_39], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# batch_norm_23 => add_58, add_59, mul_58, mul_59, rsqrt_23, sub_23, var_mean_23
# x_39 => relu_23
# Graph fragment:
# %var_mean_23 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%convolution_26, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_58 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_54, 1e-05), kwargs = {})
# %rsqrt_23 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_58,), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_26, %getitem_55), kwargs = {})
# %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %rsqrt_23), kwargs = {})
# %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_58, %unsqueeze_88), kwargs = {})
# %add_59 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_59, %unsqueeze_90), kwargs = {})
# %relu_23 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_59,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_23, 0), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_32 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_32(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x3), tmp15, None)
tl.store(out_ptr1 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wi/cwiczlgxwcgikfdob5rj3hbxy7jq3qgt74hx4ikp4oldudpj6g2j.py
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_40 => add_60, add_61, convert_element_type_12, convert_element_type_13, iota_6, mul_60, mul_61
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_60 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_6, 1), kwargs = {})
# %add_60 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_60, 0), kwargs = {})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_60, torch.float32), kwargs = {})
# %add_61 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.0), kwargs = {})
# %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_61, 0.5), kwargs = {})
# %convert_element_type_13 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_61, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_33 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_33', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_33(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3g/c3garej7jntewgdi5rzppp4n3dbeburc4c6h56gujysgt2xodeux.py
# Topologically Sorted Source Nodes: [x_40, x_41], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_40 => _unsafe_index_3
# x_41 => constant_pad_nd_3
# Graph fragment:
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_23, [None, None, %unsqueeze_99, %convert_element_type_13]), kwargs = {})
# %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%_unsafe_index_3, [0, 1, 0, 1], 0.0), kwargs = {})
triton_poi_fused__unsafe_index_constant_pad_nd_34 = async_compile.triton('triton_poi_fused__unsafe_index_constant_pad_nd_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_constant_pad_nd_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 540800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 65) % 65
x0 = xindex % 65
x2 = (xindex // 4225)
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 64, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x1), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([XBLOCK], 32, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + (x0), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + (32*tmp10) + (1024*x2)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n2/cn23k7urrmolzptbeulandvxbnd3cswv6mk7jwvizikioena3t75.py
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_43 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_2, %convolution_27], 1), kwargs = {})
triton_poi_fused_cat_35 = async_compile.triton('triton_poi_fused_cat_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 32
x0 = xindex % 4096
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (65536*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 32, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-16) + x1)) + (65536*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-16) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hl/chlanvf6deljvloy4dw3sabhgy2edb2wgppny7ntjxlkejy73qqx.py
# Topologically Sorted Source Nodes: [x10], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x10 => convolution_31
# Graph fragment:
# %convolution_31 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_26, %primals_82, %primals_83, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_36 = async_compile.triton('triton_poi_fused_convolution_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_36', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83 = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, ), (1, ))
assert_size_stride(primals_13, (32, ), (1, ))
assert_size_stride(primals_14, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (32, ), (1, ))
assert_size_stride(primals_16, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_17, (32, ), (1, ))
assert_size_stride(primals_18, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_19, (64, ), (1, ))
assert_size_stride(primals_20, (64, ), (1, ))
assert_size_stride(primals_21, (64, ), (1, ))
assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_23, (64, ), (1, ))
assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_25, (64, ), (1, ))
assert_size_stride(primals_26, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (128, ), (1, ))
assert_size_stride(primals_29, (128, ), (1, ))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128, ), (1, ))
assert_size_stride(primals_32, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_33, (128, ), (1, ))
assert_size_stride(primals_34, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (256, ), (1, ))
assert_size_stride(primals_36, (256, ), (1, ))
assert_size_stride(primals_37, (256, ), (1, ))
assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (256, ), (1, ))
assert_size_stride(primals_40, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_41, (256, ), (1, ))
assert_size_stride(primals_42, (128, 256, 2, 2), (1024, 4, 2, 1))
assert_size_stride(primals_43, (128, ), (1, ))
assert_size_stride(primals_44, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_45, (128, ), (1, ))
assert_size_stride(primals_46, (128, ), (1, ))
assert_size_stride(primals_47, (128, ), (1, ))
assert_size_stride(primals_48, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_49, (128, ), (1, ))
assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_51, (128, ), (1, ))
assert_size_stride(primals_52, (64, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_53, (64, ), (1, ))
assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_55, (64, ), (1, ))
assert_size_stride(primals_56, (64, ), (1, ))
assert_size_stride(primals_57, (64, ), (1, ))
assert_size_stride(primals_58, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_59, (64, ), (1, ))
assert_size_stride(primals_60, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_61, (64, ), (1, ))
assert_size_stride(primals_62, (32, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_63, (32, ), (1, ))
assert_size_stride(primals_64, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_65, (32, ), (1, ))
assert_size_stride(primals_66, (32, ), (1, ))
assert_size_stride(primals_67, (32, ), (1, ))
assert_size_stride(primals_68, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_69, (32, ), (1, ))
assert_size_stride(primals_70, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_71, (32, ), (1, ))
assert_size_stride(primals_72, (16, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_73, (16, ), (1, ))
assert_size_stride(primals_74, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_75, (16, ), (1, ))
assert_size_stride(primals_76, (16, ), (1, ))
assert_size_stride(primals_77, (16, ), (1, ))
assert_size_stride(primals_78, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_79, (16, ), (1, ))
assert_size_stride(primals_80, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_81, (16, ), (1, ))
assert_size_stride(primals_82, (1, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_83, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_1.run(buf1, buf2, buf3, buf4, 32, 8192, grid=grid(32), stream=stream0)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_2.run(buf2, buf3, buf4, buf5, buf6, buf8, 16, 2, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm, x], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_3.run(buf1, buf5, buf6, primals_4, primals_5, buf9, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf11, primals_7, 262144, grid=grid(262144), stream=stream0)
del primals_7
buf12 = buf4; del buf4 # reuse
buf13 = buf3; del buf3 # reuse
buf14 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [batch_norm_1], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_1.run(buf11, buf12, buf13, buf14, 32, 8192, grid=grid(32), stream=stream0)
buf15 = buf6; del buf6 # reuse
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf18 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_1], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_2.run(buf12, buf13, buf14, buf15, buf16, buf18, 16, 2, grid=grid(16), stream=stream0)
buf19 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_1, x_1], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_3.run(buf11, buf15, buf16, primals_4, primals_5, buf19, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf21, primals_9, 262144, grid=grid(262144), stream=stream0)
del primals_9
buf22 = buf14; del buf14 # reuse
buf23 = buf13; del buf13 # reuse
buf24 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [batch_norm_2], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_1.run(buf21, buf22, buf23, buf24, 32, 8192, grid=grid(32), stream=stream0)
buf25 = buf16; del buf16 # reuse
buf26 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf28 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_2], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_2.run(buf22, buf23, buf24, buf25, buf26, buf28, 16, 2, grid=grid(16), stream=stream0)
buf29 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_2, x_2], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_3.run(buf21, buf25, buf26, primals_4, primals_5, buf29, 262144, grid=grid(262144), stream=stream0)
del primals_5
buf30 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32)
buf31 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_4.run(buf29, buf30, buf31, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf30, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf33, primals_11, 131072, grid=grid(131072), stream=stream0)
del primals_11
buf34 = reinterpret_tensor(buf24, (1, 32, 1, 1), (32, 1, 32, 32), 0); del buf24 # reuse
buf35 = reinterpret_tensor(buf23, (1, 32, 1, 1), (32, 1, 32, 32), 0); del buf23 # reuse
buf37 = reinterpret_tensor(buf22, (1, 32, 1, 1), (32, 1, 32, 32), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [batch_norm_3], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_6.run(buf33, buf34, buf35, buf37, 32, 4096, grid=grid(32), stream=stream0)
buf38 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_3, x_4], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_7.run(buf33, buf34, buf35, primals_12, primals_13, buf38, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf38, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf40 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf40, primals_15, 131072, grid=grid(131072), stream=stream0)
del primals_15
buf41 = buf35; del buf35 # reuse
buf42 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf44 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_4], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_6.run(buf40, buf41, buf42, buf44, 32, 4096, grid=grid(32), stream=stream0)
buf45 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_4, x_5], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_7.run(buf40, buf41, buf42, primals_12, primals_13, buf45, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf47 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf47, primals_17, 131072, grid=grid(131072), stream=stream0)
del primals_17
buf48 = buf42; del buf42 # reuse
buf49 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf51 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_5], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_6.run(buf47, buf48, buf49, buf51, 32, 4096, grid=grid(32), stream=stream0)
buf52 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_5, x_6], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_7.run(buf47, buf48, buf49, primals_12, primals_13, buf52, 131072, grid=grid(131072), stream=stream0)
del primals_13
buf53 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32)
buf54 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf52, buf53, buf54, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf53, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 64, 16, 16), (16384, 256, 16, 1))
buf56 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf56, primals_19, 65536, grid=grid(65536), stream=stream0)
del primals_19
buf57 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf58 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf60 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_6], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_10.run(buf56, buf57, buf58, buf60, 64, 1024, grid=grid(64), stream=stream0)
buf61 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_6, x_8], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_11.run(buf56, buf57, buf58, primals_20, primals_21, buf61, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf61, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 64, 16, 16), (16384, 256, 16, 1))
buf63 = buf62; del buf62 # reuse
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf63, primals_23, 65536, grid=grid(65536), stream=stream0)
del primals_23
buf64 = buf58; del buf58 # reuse
buf65 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf67 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_7], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_10.run(buf63, buf64, buf65, buf67, 64, 1024, grid=grid(64), stream=stream0)
buf68 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_7, x_9], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_11.run(buf63, buf64, buf65, primals_20, primals_21, buf68, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf69 = extern_kernels.convolution(buf68, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 64, 16, 16), (16384, 256, 16, 1))
buf70 = buf69; del buf69 # reuse
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf70, primals_25, 65536, grid=grid(65536), stream=stream0)
del primals_25
buf71 = buf65; del buf65 # reuse
buf72 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf74 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_8], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_10.run(buf70, buf71, buf72, buf74, 64, 1024, grid=grid(64), stream=stream0)
buf75 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_8, x_10], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_11.run(buf70, buf71, buf72, primals_20, primals_21, buf75, 65536, grid=grid(65536), stream=stream0)
del primals_21
buf76 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.float32)
buf77 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf75, buf76, buf77, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf78 = extern_kernels.convolution(buf76, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78; del buf78 # reuse
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf79, primals_27, 32768, grid=grid(32768), stream=stream0)
del primals_27
buf80 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf81 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf83 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_9], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_14.run(buf79, buf80, buf81, buf83, 128, 256, grid=grid(128), stream=stream0)
buf84 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_9, x_12], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_15.run(buf79, buf80, buf81, primals_28, primals_29, buf84, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf85 = extern_kernels.convolution(buf84, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf85, (4, 128, 8, 8), (8192, 64, 8, 1))
buf86 = buf85; del buf85 # reuse
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf86, primals_31, 32768, grid=grid(32768), stream=stream0)
del primals_31
buf87 = buf81; del buf81 # reuse
buf88 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf90 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_10], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_14.run(buf86, buf87, buf88, buf90, 128, 256, grid=grid(128), stream=stream0)
buf91 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_10, x_13], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_15.run(buf86, buf87, buf88, primals_28, primals_29, buf91, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf92 = extern_kernels.convolution(buf91, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 128, 8, 8), (8192, 64, 8, 1))
buf93 = buf92; del buf92 # reuse
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf93, primals_33, 32768, grid=grid(32768), stream=stream0)
del primals_33
buf94 = buf88; del buf88 # reuse
buf95 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf97 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_11], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_14.run(buf93, buf94, buf95, buf97, 128, 256, grid=grid(128), stream=stream0)
buf98 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_11, x_14], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_15.run(buf93, buf94, buf95, primals_28, primals_29, buf98, 32768, grid=grid(32768), stream=stream0)
del primals_29
buf99 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
buf100 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_16.run(buf98, buf99, buf100, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf101 = extern_kernels.convolution(buf99, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 256, 4, 4), (4096, 16, 4, 1))
buf102 = buf101; del buf101 # reuse
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
triton_poi_fused_convolution_17.run(buf102, primals_35, 16384, grid=grid(16384), stream=stream0)
del primals_35
buf103 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf104 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf106 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_12], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_18.run(buf102, buf103, buf104, buf106, 256, 64, grid=grid(256), stream=stream0)
buf107 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_12, x_16], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_19.run(buf102, buf103, buf104, primals_36, primals_37, buf107, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf108 = extern_kernels.convolution(buf107, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf108, (4, 256, 4, 4), (4096, 16, 4, 1))
buf109 = buf108; del buf108 # reuse
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
triton_poi_fused_convolution_17.run(buf109, primals_39, 16384, grid=grid(16384), stream=stream0)
del primals_39
buf110 = buf104; del buf104 # reuse
buf111 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf113 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_13], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_18.run(buf109, buf110, buf111, buf113, 256, 64, grid=grid(256), stream=stream0)
buf114 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_13, x_17], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_19.run(buf109, buf110, buf111, primals_36, primals_37, buf114, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 256, 4, 4), (4096, 16, 4, 1))
buf116 = buf115; del buf115 # reuse
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
triton_poi_fused_convolution_17.run(buf116, primals_41, 16384, grid=grid(16384), stream=stream0)
del primals_41
buf117 = buf111; del buf111 # reuse
buf118 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf120 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_14], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_18.run(buf116, buf117, buf118, buf120, 256, 64, grid=grid(256), stream=stream0)
buf121 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
buf236 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [batch_norm_14, x_18], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_20.run(buf116, buf117, buf118, primals_36, primals_37, buf121, buf236, 16384, grid=grid(16384), stream=stream0)
del buf118
del primals_37
buf122 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_21.run(buf122, 8, grid=grid(8), stream=stream0)
buf123 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_19, x_20], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
triton_poi_fused__unsafe_index_constant_pad_nd_22.run(buf122, buf121, buf123, 82944, grid=grid(82944), stream=stream0)
del buf121
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution]
buf124 = extern_kernels.convolution(buf123, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.cat]
triton_poi_fused_cat_23.run(buf98, buf124, primals_43, buf125, 65536, grid=grid(65536), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf126 = extern_kernels.convolution(buf125, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126; del buf126 # reuse
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf127, primals_45, 32768, grid=grid(32768), stream=stream0)
del primals_45
buf128 = buf95; del buf95 # reuse
buf129 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf131 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_15], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_14.run(buf127, buf128, buf129, buf131, 128, 256, grid=grid(128), stream=stream0)
buf132 = buf124; del buf124 # reuse
# Topologically Sorted Source Nodes: [batch_norm_15, x_23], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_15.run(buf127, buf128, buf129, primals_46, primals_47, buf132, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf133 = extern_kernels.convolution(buf132, primals_48, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf133, (4, 128, 8, 8), (8192, 64, 8, 1))
buf134 = buf133; del buf133 # reuse
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf134, primals_49, 32768, grid=grid(32768), stream=stream0)
del primals_49
buf135 = buf129; del buf129 # reuse
buf136 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf138 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_16], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_14.run(buf134, buf135, buf136, buf138, 128, 256, grid=grid(128), stream=stream0)
buf139 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_16, x_24], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_15.run(buf134, buf135, buf136, primals_46, primals_47, buf139, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf140 = extern_kernels.convolution(buf139, primals_50, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140; del buf140 # reuse
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf141, primals_51, 32768, grid=grid(32768), stream=stream0)
del primals_51
buf142 = buf136; del buf136 # reuse
buf143 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf145 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_17], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_14.run(buf141, buf142, buf143, buf145, 128, 256, grid=grid(128), stream=stream0)
buf146 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
buf235 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [batch_norm_17, x_25], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_24.run(buf141, buf142, buf143, primals_46, primals_47, buf146, buf235, 32768, grid=grid(32768), stream=stream0)
del buf143
del primals_47
buf147 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_25.run(buf147, 16, grid=grid(16), stream=stream0)
buf148 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
triton_poi_fused__unsafe_index_constant_pad_nd_26.run(buf147, buf146, buf148, 147968, grid=grid(147968), stream=stream0)
del buf146
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
buf149 = extern_kernels.convolution(buf148, primals_52, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 64, 16, 16), (16384, 256, 16, 1))
buf150 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.cat]
triton_poi_fused_cat_27.run(buf75, buf149, primals_53, buf150, 131072, grid=grid(131072), stream=stream0)
del primals_53
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf151 = extern_kernels.convolution(buf150, primals_54, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 64, 16, 16), (16384, 256, 16, 1))
buf152 = buf151; del buf151 # reuse
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf152, primals_55, 65536, grid=grid(65536), stream=stream0)
del primals_55
buf153 = buf72; del buf72 # reuse
buf154 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf156 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_18], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_10.run(buf152, buf153, buf154, buf156, 64, 1024, grid=grid(64), stream=stream0)
buf157 = buf149; del buf149 # reuse
# Topologically Sorted Source Nodes: [batch_norm_18, x_30], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_11.run(buf152, buf153, buf154, primals_56, primals_57, buf157, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf158 = extern_kernels.convolution(buf157, primals_58, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf158, (4, 64, 16, 16), (16384, 256, 16, 1))
buf159 = buf158; del buf158 # reuse
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf159, primals_59, 65536, grid=grid(65536), stream=stream0)
del primals_59
buf160 = buf154; del buf154 # reuse
buf161 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf163 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_19], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_10.run(buf159, buf160, buf161, buf163, 64, 1024, grid=grid(64), stream=stream0)
buf164 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_19, x_31], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_11.run(buf159, buf160, buf161, primals_56, primals_57, buf164, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
buf165 = extern_kernels.convolution(buf164, primals_60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 64, 16, 16), (16384, 256, 16, 1))
buf166 = buf165; del buf165 # reuse
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf166, primals_61, 65536, grid=grid(65536), stream=stream0)
del primals_61
buf167 = buf161; del buf161 # reuse
buf168 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf170 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_20], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_10.run(buf166, buf167, buf168, buf170, 64, 1024, grid=grid(64), stream=stream0)
buf171 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
buf234 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [batch_norm_20, x_32], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_28.run(buf166, buf167, buf168, primals_56, primals_57, buf171, buf234, 65536, grid=grid(65536), stream=stream0)
del buf168
del primals_57
buf172 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_29.run(buf172, 32, grid=grid(32), stream=stream0)
buf173 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_33, x_34], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
triton_poi_fused__unsafe_index_constant_pad_nd_30.run(buf172, buf171, buf173, 278784, grid=grid(278784), stream=stream0)
del buf171
# Topologically Sorted Source Nodes: [x_35], Original ATen: [aten.convolution]
buf174 = extern_kernels.convolution(buf173, primals_62, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf174, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf175 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.cat]
triton_poi_fused_cat_31.run(buf52, buf174, primals_63, buf175, 262144, grid=grid(262144), stream=stream0)
del primals_63
# Topologically Sorted Source Nodes: [conv2d_24], Original ATen: [aten.convolution]
buf176 = extern_kernels.convolution(buf175, primals_64, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf176, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf177 = buf176; del buf176 # reuse
# Topologically Sorted Source Nodes: [conv2d_24], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf177, primals_65, 131072, grid=grid(131072), stream=stream0)
del primals_65
buf178 = buf49; del buf49 # reuse
buf179 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf181 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_21], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_6.run(buf177, buf178, buf179, buf181, 32, 4096, grid=grid(32), stream=stream0)
buf182 = buf174; del buf174 # reuse
# Topologically Sorted Source Nodes: [batch_norm_21, x_37], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_7.run(buf177, buf178, buf179, primals_66, primals_67, buf182, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_25], Original ATen: [aten.convolution]
buf183 = extern_kernels.convolution(buf182, primals_68, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf184 = buf183; del buf183 # reuse
# Topologically Sorted Source Nodes: [conv2d_25], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf184, primals_69, 131072, grid=grid(131072), stream=stream0)
del primals_69
buf185 = buf179; del buf179 # reuse
buf186 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf188 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_22], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_6.run(buf184, buf185, buf186, buf188, 32, 4096, grid=grid(32), stream=stream0)
buf189 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_22, x_38], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_7.run(buf184, buf185, buf186, primals_66, primals_67, buf189, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_26], Original ATen: [aten.convolution]
buf190 = extern_kernels.convolution(buf189, primals_70, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf190, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf191 = buf190; del buf190 # reuse
# Topologically Sorted Source Nodes: [conv2d_26], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf191, primals_71, 131072, grid=grid(131072), stream=stream0)
del primals_71
buf192 = buf186; del buf186 # reuse
buf193 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf195 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_23], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_6.run(buf191, buf192, buf193, buf195, 32, 4096, grid=grid(32), stream=stream0)
buf196 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf233 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [batch_norm_23, x_39], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_32.run(buf191, buf192, buf193, primals_66, primals_67, buf196, buf233, 131072, grid=grid(131072), stream=stream0)
del primals_67
buf197 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_33.run(buf197, 64, grid=grid(64), stream=stream0)
buf198 = empty_strided_cuda((4, 32, 65, 65), (135200, 4225, 65, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_40, x_41], Original ATen: [aten._unsafe_index, aten.constant_pad_nd]
triton_poi_fused__unsafe_index_constant_pad_nd_34.run(buf197, buf196, buf198, 540800, grid=grid(540800), stream=stream0)
del buf196
# Topologically Sorted Source Nodes: [x_42], Original ATen: [aten.convolution]
buf199 = extern_kernels.convolution(buf198, primals_72, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf199, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf200 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat]
triton_poi_fused_cat_35.run(buf29, buf199, primals_73, buf200, 524288, grid=grid(524288), stream=stream0)
del primals_73
# Topologically Sorted Source Nodes: [conv2d_28], Original ATen: [aten.convolution]
buf201 = extern_kernels.convolution(buf200, primals_74, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf201, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf202 = buf201; del buf201 # reuse
# Topologically Sorted Source Nodes: [conv2d_28], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf202, primals_75, 262144, grid=grid(262144), stream=stream0)
del primals_75
buf203 = reinterpret_tensor(buf193, (1, 16, 1, 1, 2), (32, 1, 32, 32, 16), 0); del buf193 # reuse
buf204 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16), torch.float32)
buf205 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_24], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_1.run(buf202, buf203, buf204, buf205, 32, 8192, grid=grid(32), stream=stream0)
buf206 = buf26; del buf26 # reuse
buf207 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf209 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_24], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_2.run(buf203, buf204, buf205, buf206, buf207, buf209, 16, 2, grid=grid(16), stream=stream0)
buf210 = buf199; del buf199 # reuse
# Topologically Sorted Source Nodes: [batch_norm_24, x_44], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_3.run(buf202, buf206, buf207, primals_76, primals_77, buf210, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_29], Original ATen: [aten.convolution]
buf211 = extern_kernels.convolution(buf210, primals_78, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf211, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf212 = buf211; del buf211 # reuse
# Topologically Sorted Source Nodes: [conv2d_29], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf212, primals_79, 262144, grid=grid(262144), stream=stream0)
del primals_79
buf213 = buf205; del buf205 # reuse
buf214 = buf204; del buf204 # reuse
buf215 = buf203; del buf203 # reuse
# Topologically Sorted Source Nodes: [batch_norm_25], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_1.run(buf212, buf213, buf214, buf215, 32, 8192, grid=grid(32), stream=stream0)
buf216 = buf207; del buf207 # reuse
buf217 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf219 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_25], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_2.run(buf213, buf214, buf215, buf216, buf217, buf219, 16, 2, grid=grid(16), stream=stream0)
buf220 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_25, x_45], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_3.run(buf212, buf216, buf217, primals_76, primals_77, buf220, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_30], Original ATen: [aten.convolution]
buf221 = extern_kernels.convolution(buf220, primals_80, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf221, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf222 = buf221; del buf221 # reuse
# Topologically Sorted Source Nodes: [conv2d_30], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf222, primals_81, 262144, grid=grid(262144), stream=stream0)
del primals_81
buf223 = buf215; del buf215 # reuse
buf224 = buf214; del buf214 # reuse
buf225 = buf213; del buf213 # reuse
# Topologically Sorted Source Nodes: [batch_norm_26], Original ATen: [aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_1.run(buf222, buf223, buf224, buf225, 32, 8192, grid=grid(32), stream=stream0)
buf226 = buf217; del buf217 # reuse
buf227 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf229 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_26], Original ATen: [aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_2.run(buf223, buf224, buf225, buf226, buf227, buf229, 16, 2, grid=grid(16), stream=stream0)
del buf223
del buf224
del buf225
buf230 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch_norm_26, x_46], Original ATen: [aten._native_batch_norm_legit, aten.relu]
triton_poi_fused__native_batch_norm_legit_relu_3.run(buf222, buf226, buf227, primals_76, primals_77, buf230, 262144, grid=grid(262144), stream=stream0)
del buf227
del primals_77
# Topologically Sorted Source Nodes: [x10], Original ATen: [aten.convolution]
buf231 = extern_kernels.convolution(buf230, primals_82, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf231, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf232 = buf231; del buf231 # reuse
# Topologically Sorted Source Nodes: [x10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_36.run(buf232, primals_83, 16384, grid=grid(16384), stream=stream0)
del primals_83
return (buf232, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, primals_54, primals_56, primals_58, primals_60, primals_62, primals_64, primals_66, primals_68, primals_70, primals_72, primals_74, primals_76, primals_78, primals_80, primals_82, buf1, reinterpret_tensor(buf8, (16, ), (1, ), 0), buf9, buf11, reinterpret_tensor(buf18, (16, ), (1, ), 0), buf19, buf21, reinterpret_tensor(buf28, (16, ), (1, ), 0), buf29, buf30, buf31, buf33, reinterpret_tensor(buf37, (32, ), (1, ), 0), buf38, buf40, reinterpret_tensor(buf44, (32, ), (1, ), 0), buf45, buf47, reinterpret_tensor(buf51, (32, ), (1, ), 0), buf52, buf53, buf54, buf56, reinterpret_tensor(buf60, (64, ), (1, ), 0), buf61, buf63, reinterpret_tensor(buf67, (64, ), (1, ), 0), buf68, buf70, reinterpret_tensor(buf74, (64, ), (1, ), 0), buf75, buf76, buf77, buf79, reinterpret_tensor(buf83, (128, ), (1, ), 0), buf84, buf86, reinterpret_tensor(buf90, (128, ), (1, ), 0), buf91, buf93, reinterpret_tensor(buf97, (128, ), (1, ), 0), buf98, buf99, buf100, buf102, reinterpret_tensor(buf106, (256, ), (1, ), 0), buf107, buf109, reinterpret_tensor(buf113, (256, ), (1, ), 0), buf114, buf116, reinterpret_tensor(buf120, (256, ), (1, ), 0), buf122, buf123, buf125, buf127, reinterpret_tensor(buf131, (128, ), (1, ), 0), buf132, buf134, reinterpret_tensor(buf138, (128, ), (1, ), 0), buf139, buf141, reinterpret_tensor(buf145, (128, ), (1, ), 0), buf147, buf148, buf150, buf152, reinterpret_tensor(buf156, (64, ), (1, ), 0), buf157, buf159, reinterpret_tensor(buf163, (64, ), (1, ), 0), buf164, buf166, reinterpret_tensor(buf170, (64, ), (1, ), 0), buf172, buf173, buf175, buf177, reinterpret_tensor(buf181, (32, ), (1, ), 0), buf182, buf184, reinterpret_tensor(buf188, (32, ), (1, ), 0), buf189, buf191, reinterpret_tensor(buf195, (32, ), (1, ), 0), buf197, buf198, buf200, buf202, reinterpret_tensor(buf209, (16, ), (1, ), 0), buf210, buf212, reinterpret_tensor(buf219, (16, ), (1, ), 0), buf220, buf222, reinterpret_tensor(buf229, (16, ), (1, ), 0), buf230, reinterpret_tensor(buf226, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf216, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf206, (1, 16, 1, 1), (16, 1, 1, 1), 0), buf233, reinterpret_tensor(buf192, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf185, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf178, (1, 32, 1, 1), (32, 1, 1, 1), 0), buf234, reinterpret_tensor(buf167, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf160, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf153, (1, 64, 1, 1), (64, 1, 1, 1), 0), buf235, reinterpret_tensor(buf142, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf135, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf128, (1, 128, 1, 1), (128, 1, 1, 1), 0), buf236, reinterpret_tensor(buf117, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf110, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf103, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf94, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf87, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf80, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf71, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf64, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf57, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf48, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf41, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf34, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf25, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf15, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf5, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((128, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((64, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((32, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((16, 32, 2, 2), (128, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((1, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.cuda import *
def conv3x3(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=True)
def maxpool2x2():
return nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
def concat(xh, xv):
return torch.cat([xh, xv], dim=1)
class UpConv2x2(nn.Module):
def __init__(self, channels):
super(UpConv2x2, self).__init__()
self.conv = nn.Conv2d(channels, channels // 2, kernel_size=2,
stride=1, padding=0, bias=True)
def forward(self, x):
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = F.pad(x, (0, 1, 0, 1))
x = self.conv(x)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super(ConvBlock, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, x):
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class DownConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super(DownConvBlock, self).__init__()
self.maxpool = maxpool2x2()
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, x):
x = self.maxpool(x)
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class UpConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super(UpConvBlock, self).__init__()
self.upconv = UpConv2x2(in_channels)
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, xh, xv):
"""
Args:
xh: torch Variable, activations from same resolution feature maps (gray arrow in diagram)
xv: torch Variable, activations from lower resolution feature maps (green arrow in diagram)
"""
xv = self.upconv(xv)
x = concat(xh, xv)
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class UNet(nn.Module):
def __init__(self):
super(UNet, self).__init__()
fs = [16, 32, 64, 128, 256]
self.conv_in = ConvBlock(1, fs[0])
self.dconv1 = DownConvBlock(fs[0], fs[1])
self.dconv2 = DownConvBlock(fs[1], fs[2])
self.dconv3 = DownConvBlock(fs[2], fs[3])
self.dconv4 = DownConvBlock(fs[3], fs[4])
self.uconv1 = UpConvBlock(fs[4], fs[3])
self.uconv2 = UpConvBlock(fs[3], fs[2])
self.uconv3 = UpConvBlock(fs[2], fs[1])
self.uconv4 = UpConvBlock(fs[1], fs[0])
self.conv_out = conv3x3(fs[0], 1)
self._initialize_weights()
def forward(self, x):
x1 = self.conv_in(x)
x2 = self.dconv1(x1)
x3 = self.dconv2(x2)
x4 = self.dconv3(x3)
x5 = self.dconv4(x4)
x6 = self.uconv1(x4, x5)
x7 = self.uconv2(x3, x6)
x8 = self.uconv3(x2, x7)
x9 = self.uconv4(x1, x8)
x10 = self.conv_out(x9)
return x10
def _initialize_weights(self):
conv_modules = [m for m in self.modules() if isinstance(m, nn.Conv2d)]
for m in conv_modules:
n = m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3]
m.weight.data.normal_(0, np.sqrt(2.0 / n))
m.bias.data.zero_()
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.cuda import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 32
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = xindex // 16
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (4096 * x0 + 65536 * (r2 // 4096) + 131072 *
x1 + r2 % 4096), rmask & xmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr1 + x3, tmp3, xmask)
tl.store(out_ptr2 + x3, tmp4, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 16 * r1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 16384.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16384.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_6(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex % 1024
r2 = rindex // 1024
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0 + 32768 * r2), rmask &
xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_7(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_10(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex % 256
r2 = rindex // 256
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0 + 16384 * r2), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x0, tmp18, None)
tl.store(out_ptr0 + x0, tmp8, None)
tl.store(out_ptr1 + x0, tmp13, None)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_11(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_14(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex % 64
r2 = rindex // 64
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0 + 8192 * r2), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x0, tmp18, None)
tl.store(out_ptr0 + x0, tmp8, None)
tl.store(out_ptr1 + x0, tmp13, None)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_15(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_18(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 4096 * r2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_19(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_20(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + x3, tmp15, None)
tl.store(out_ptr1 + x3, tmp17, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_21(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_22(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 9 % 9
x0 = xindex % 9
x2 = xindex // 81
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tl.full([XBLOCK], 4, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + x0, tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + 4 * tmp10 + 16 * x2), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 256
x0 = xindex % 64
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 8192 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-128 + x1) + 8192 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-128 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_24(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + x3, tmp15, None)
tl.store(out_ptr1 + x3, tmp17, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_25(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_26(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 17 % 17
x0 = xindex % 17
x2 = xindex // 289
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 16, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tl.full([XBLOCK], 8, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + x0, tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + 8 * tmp10 + 64 * x2), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused_cat_27(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 128
x0 = xindex % 256
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 256 * (-64 + x1) + 16384 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-64 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_28(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + x3, tmp15, None)
tl.store(out_ptr1 + x3, tmp17, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_29(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_30(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 33 % 33
x0 = xindex % 33
x2 = xindex // 1089
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 32, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tl.full([XBLOCK], 16, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + x0, tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + 16 * tmp10 + 256 * x2), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused_cat_31(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 64
x0 = xindex % 1024
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 32768 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 1024 * (-32 + x1) + 32768 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-32 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_32(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + x3, tmp15, None)
tl.store(out_ptr1 + x3, tmp17, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_33(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_34(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 540800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 65 % 65
x0 = xindex % 65
x2 = xindex // 4225
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 64, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tl.full([XBLOCK], 32, tl.int32)
tmp8 = tmp6 + tmp7
tmp9 = tmp6 < 0
tmp10 = tl.where(tmp9, tmp8, tmp6)
tmp11 = tl.load(in_ptr0 + x0, tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tmp15 = tl.load(in_ptr1 + (tmp14 + 32 * tmp10 + 1024 * x2), tmp5 &
xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x0 = xindex % 4096
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 65536 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 32, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-16 + x1) + 65536 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-16 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82, primals_83
) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32,), (1,))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_17, (32,), (1,))
assert_size_stride(primals_18, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64,), (1,))
assert_size_stride(primals_21, (64,), (1,))
assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128,), (1,))
assert_size_stride(primals_29, (128,), (1,))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128,), (1,))
assert_size_stride(primals_32, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_33, (128,), (1,))
assert_size_stride(primals_34, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (256,), (1,))
assert_size_stride(primals_36, (256,), (1,))
assert_size_stride(primals_37, (256,), (1,))
assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (256,), (1,))
assert_size_stride(primals_40, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_41, (256,), (1,))
assert_size_stride(primals_42, (128, 256, 2, 2), (1024, 4, 2, 1))
assert_size_stride(primals_43, (128,), (1,))
assert_size_stride(primals_44, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_45, (128,), (1,))
assert_size_stride(primals_46, (128,), (1,))
assert_size_stride(primals_47, (128,), (1,))
assert_size_stride(primals_48, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_49, (128,), (1,))
assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (64, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_53, (64,), (1,))
assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_55, (64,), (1,))
assert_size_stride(primals_56, (64,), (1,))
assert_size_stride(primals_57, (64,), (1,))
assert_size_stride(primals_58, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_59, (64,), (1,))
assert_size_stride(primals_60, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_61, (64,), (1,))
assert_size_stride(primals_62, (32, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_63, (32,), (1,))
assert_size_stride(primals_64, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_65, (32,), (1,))
assert_size_stride(primals_66, (32,), (1,))
assert_size_stride(primals_67, (32,), (1,))
assert_size_stride(primals_68, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_69, (32,), (1,))
assert_size_stride(primals_70, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_71, (32,), (1,))
assert_size_stride(primals_72, (16, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_73, (16,), (1,))
assert_size_stride(primals_74, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_75, (16,), (1,))
assert_size_stride(primals_76, (16,), (1,))
assert_size_stride(primals_77, (16,), (1,))
assert_size_stride(primals_78, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_79, (16,), (1,))
assert_size_stride(primals_80, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_81, (16,), (1,))
assert_size_stride(primals_82, (1, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_83, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16),
torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16),
torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16),
torch.float32)
triton_red_fused__native_batch_norm_legit_1[grid(32)](buf1, buf2,
buf3, buf4, 32, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
triton_per_fused__native_batch_norm_legit_2[grid(16)](buf2, buf3,
buf4, buf5, buf6, buf8, 16, 2, XBLOCK=8, num_warps=2, num_stages=1)
buf9 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_3[grid(262144)](buf1,
buf5, buf6, primals_4, primals_5, buf9, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_0[grid(262144)](buf11, primals_7,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = buf4
del buf4
buf13 = buf3
del buf3
buf14 = buf2
del buf2
triton_red_fused__native_batch_norm_legit_1[grid(32)](buf11, buf12,
buf13, buf14, 32, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf15 = buf6
del buf6
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf18 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_2[grid(16)](buf12, buf13,
buf14, buf15, buf16, buf18, 16, 2, XBLOCK=8, num_warps=2,
num_stages=1)
buf19 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_3[grid(262144)](buf11,
buf15, buf16, primals_4, primals_5, buf19, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_0[grid(262144)](buf21, primals_9,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf22 = buf14
del buf14
buf23 = buf13
del buf13
buf24 = buf12
del buf12
triton_red_fused__native_batch_norm_legit_1[grid(32)](buf21, buf22,
buf23, buf24, 32, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf25 = buf16
del buf16
buf26 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf28 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_2[grid(16)](buf22, buf23,
buf24, buf25, buf26, buf28, 16, 2, XBLOCK=8, num_warps=2,
num_stages=1)
buf29 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_3[grid(262144)](buf21,
buf25, buf26, primals_4, primals_5, buf29, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_5
buf30 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.float32)
buf31 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_4[grid(65536)](buf29,
buf30, buf31, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf32 = extern_kernels.convolution(buf30, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_5[grid(131072)](buf33, primals_11,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf34 = reinterpret_tensor(buf24, (1, 32, 1, 1), (32, 1, 32, 32), 0)
del buf24
buf35 = reinterpret_tensor(buf23, (1, 32, 1, 1), (32, 1, 32, 32), 0)
del buf23
buf37 = reinterpret_tensor(buf22, (1, 32, 1, 1), (32, 1, 32, 32), 0)
del buf22
triton_red_fused__native_batch_norm_legit_6[grid(32)](buf33, buf34,
buf35, buf37, 32, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf38 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_7[grid(131072)](buf33,
buf34, buf35, primals_12, primals_13, buf38, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf39 = extern_kernels.convolution(buf38, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf40 = buf39
del buf39
triton_poi_fused_convolution_5[grid(131072)](buf40, primals_15,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf41 = buf35
del buf35
buf42 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
buf44 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_red_fused__native_batch_norm_legit_6[grid(32)](buf40, buf41,
buf42, buf44, 32, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf45 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_7[grid(131072)](buf40,
buf41, buf42, primals_12, primals_13, buf45, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf46 = extern_kernels.convolution(buf45, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf47 = buf46
del buf46
triton_poi_fused_convolution_5[grid(131072)](buf47, primals_17,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf48 = buf42
del buf42
buf49 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
buf51 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_red_fused__native_batch_norm_legit_6[grid(32)](buf47, buf48,
buf49, buf51, 32, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf52 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_7[grid(131072)](buf47,
buf48, buf49, primals_12, primals_13, buf52, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_13
buf53 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf54 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(32768)](buf52,
buf53, buf54, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf55 = extern_kernels.convolution(buf53, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 64, 16, 16), (16384, 256, 16, 1))
buf56 = buf55
del buf55
triton_poi_fused_convolution_9[grid(65536)](buf56, primals_19,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
buf57 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf58 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf60 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_10[grid(64)](buf56, buf57,
buf58, buf60, 64, 1024, num_warps=8, num_stages=1)
buf61 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_11[grid(65536)](buf56,
buf57, buf58, primals_20, primals_21, buf61, 65536, XBLOCK=512,
num_warps=4, num_stages=1)
buf62 = extern_kernels.convolution(buf61, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 64, 16, 16), (16384, 256, 16, 1))
buf63 = buf62
del buf62
triton_poi_fused_convolution_9[grid(65536)](buf63, primals_23,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_23
buf64 = buf58
del buf58
buf65 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf67 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_10[grid(64)](buf63, buf64,
buf65, buf67, 64, 1024, num_warps=8, num_stages=1)
buf68 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_11[grid(65536)](buf63,
buf64, buf65, primals_20, primals_21, buf68, 65536, XBLOCK=512,
num_warps=4, num_stages=1)
buf69 = extern_kernels.convolution(buf68, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 64, 16, 16), (16384, 256, 16, 1))
buf70 = buf69
del buf69
triton_poi_fused_convolution_9[grid(65536)](buf70, primals_25,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_25
buf71 = buf65
del buf65
buf72 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf74 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_10[grid(64)](buf70, buf71,
buf72, buf74, 64, 1024, num_warps=8, num_stages=1)
buf75 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_11[grid(65536)](buf70,
buf71, buf72, primals_20, primals_21, buf75, 65536, XBLOCK=512,
num_warps=4, num_stages=1)
del primals_21
buf76 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf77 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_12[grid(16384)](buf75,
buf76, buf77, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf78 = extern_kernels.convolution(buf76, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78
del buf78
triton_poi_fused_convolution_13[grid(32768)](buf79, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf80 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf81 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf83 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf79,
buf80, buf81, buf83, 128, 256, num_warps=2, num_stages=1)
buf84 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused__native_batch_norm_legit_relu_15[grid(32768)](buf79,
buf80, buf81, primals_28, primals_29, buf84, 32768, XBLOCK=256,
num_warps=4, num_stages=1)
buf85 = extern_kernels.convolution(buf84, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf85, (4, 128, 8, 8), (8192, 64, 8, 1))
buf86 = buf85
del buf85
triton_poi_fused_convolution_13[grid(32768)](buf86, primals_31,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf87 = buf81
del buf81
buf88 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf90 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf86,
buf87, buf88, buf90, 128, 256, num_warps=2, num_stages=1)
buf91 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused__native_batch_norm_legit_relu_15[grid(32768)](buf86,
buf87, buf88, primals_28, primals_29, buf91, 32768, XBLOCK=256,
num_warps=4, num_stages=1)
buf92 = extern_kernels.convolution(buf91, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 128, 8, 8), (8192, 64, 8, 1))
buf93 = buf92
del buf92
triton_poi_fused_convolution_13[grid(32768)](buf93, primals_33,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_33
buf94 = buf88
del buf88
buf95 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf97 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf93,
buf94, buf95, buf97, 128, 256, num_warps=2, num_stages=1)
buf98 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused__native_batch_norm_legit_relu_15[grid(32768)](buf93,
buf94, buf95, primals_28, primals_29, buf98, 32768, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_29
buf99 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf100 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_16[grid(8192)](buf98,
buf99, buf100, 8192, XBLOCK=128, num_warps=4, num_stages=1)
buf101 = extern_kernels.convolution(buf99, primals_34, stride=(1, 1
), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 256, 4, 4), (4096, 16, 4, 1))
buf102 = buf101
del buf101
triton_poi_fused_convolution_17[grid(16384)](buf102, primals_35,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_35
buf103 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf104 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf106 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused__native_batch_norm_legit_18[grid(256)](buf102,
buf103, buf104, buf106, 256, 64, XBLOCK=8, num_warps=4,
num_stages=1)
buf107 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch
.float32)
triton_poi_fused__native_batch_norm_legit_relu_19[grid(16384)](buf102,
buf103, buf104, primals_36, primals_37, buf107, 16384, XBLOCK=
128, num_warps=4, num_stages=1)
buf108 = extern_kernels.convolution(buf107, primals_38, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf108, (4, 256, 4, 4), (4096, 16, 4, 1))
buf109 = buf108
del buf108
triton_poi_fused_convolution_17[grid(16384)](buf109, primals_39,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_39
buf110 = buf104
del buf104
buf111 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf113 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused__native_batch_norm_legit_18[grid(256)](buf109,
buf110, buf111, buf113, 256, 64, XBLOCK=8, num_warps=4,
num_stages=1)
buf114 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch
.float32)
triton_poi_fused__native_batch_norm_legit_relu_19[grid(16384)](buf109,
buf110, buf111, primals_36, primals_37, buf114, 16384, XBLOCK=
128, num_warps=4, num_stages=1)
buf115 = extern_kernels.convolution(buf114, primals_40, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 256, 4, 4), (4096, 16, 4, 1))
buf116 = buf115
del buf115
triton_poi_fused_convolution_17[grid(16384)](buf116, primals_41,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_41
buf117 = buf111
del buf111
buf118 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf120 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused__native_batch_norm_legit_18[grid(256)](buf116,
buf117, buf118, buf120, 256, 64, XBLOCK=8, num_warps=4,
num_stages=1)
buf121 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch
.float32)
buf236 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch
.bool)
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_20[
grid(16384)](buf116, buf117, buf118, primals_36, primals_37,
buf121, buf236, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del buf118
del primals_37
buf122 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_21[grid(8)](buf122, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf123 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1),
torch.float32)
triton_poi_fused__unsafe_index_constant_pad_nd_22[grid(82944)](buf122,
buf121, buf123, 82944, XBLOCK=512, num_warps=8, num_stages=1)
del buf121
buf124 = extern_kernels.convolution(buf123, primals_42, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_23[grid(65536)](buf98, buf124, primals_43,
buf125, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_43
buf126 = extern_kernels.convolution(buf125, primals_44, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126
del buf126
triton_poi_fused_convolution_13[grid(32768)](buf127, primals_45,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_45
buf128 = buf95
del buf95
buf129 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf131 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf127,
buf128, buf129, buf131, 128, 256, num_warps=2, num_stages=1)
buf132 = buf124
del buf124
triton_poi_fused__native_batch_norm_legit_relu_15[grid(32768)](buf127,
buf128, buf129, primals_46, primals_47, buf132, 32768, XBLOCK=
256, num_warps=4, num_stages=1)
buf133 = extern_kernels.convolution(buf132, primals_48, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf133, (4, 128, 8, 8), (8192, 64, 8, 1))
buf134 = buf133
del buf133
triton_poi_fused_convolution_13[grid(32768)](buf134, primals_49,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_49
buf135 = buf129
del buf129
buf136 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf138 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf134,
buf135, buf136, buf138, 128, 256, num_warps=2, num_stages=1)
buf139 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch
.float32)
triton_poi_fused__native_batch_norm_legit_relu_15[grid(32768)](buf134,
buf135, buf136, primals_46, primals_47, buf139, 32768, XBLOCK=
256, num_warps=4, num_stages=1)
buf140 = extern_kernels.convolution(buf139, primals_50, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140
del buf140
triton_poi_fused_convolution_13[grid(32768)](buf141, primals_51,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_51
buf142 = buf136
del buf136
buf143 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf145 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_14[grid(128)](buf141,
buf142, buf143, buf145, 128, 256, num_warps=2, num_stages=1)
buf146 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch
.float32)
buf235 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch
.bool)
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_24[
grid(32768)](buf141, buf142, buf143, primals_46, primals_47,
buf146, buf235, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del buf143
del primals_47
buf147 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_25[grid(16)](buf147, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf148 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
triton_poi_fused__unsafe_index_constant_pad_nd_26[grid(147968)](buf147,
buf146, buf148, 147968, XBLOCK=512, num_warps=8, num_stages=1)
del buf146
buf149 = extern_kernels.convolution(buf148, primals_52, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 64, 16, 16), (16384, 256, 16, 1))
buf150 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_27[grid(131072)](buf75, buf149, primals_53,
buf150, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_53
buf151 = extern_kernels.convolution(buf150, primals_54, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 64, 16, 16), (16384, 256, 16, 1))
buf152 = buf151
del buf151
triton_poi_fused_convolution_9[grid(65536)](buf152, primals_55,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_55
buf153 = buf72
del buf72
buf154 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf156 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_10[grid(64)](buf152,
buf153, buf154, buf156, 64, 1024, num_warps=8, num_stages=1)
buf157 = buf149
del buf149
triton_poi_fused__native_batch_norm_legit_relu_11[grid(65536)](buf152,
buf153, buf154, primals_56, primals_57, buf157, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
buf158 = extern_kernels.convolution(buf157, primals_58, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf158, (4, 64, 16, 16), (16384, 256, 16, 1))
buf159 = buf158
del buf158
triton_poi_fused_convolution_9[grid(65536)](buf159, primals_59,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_59
buf160 = buf154
del buf154
buf161 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf163 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_10[grid(64)](buf159,
buf160, buf161, buf163, 64, 1024, num_warps=8, num_stages=1)
buf164 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_11[grid(65536)](buf159,
buf160, buf161, primals_56, primals_57, buf164, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
buf165 = extern_kernels.convolution(buf164, primals_60, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 64, 16, 16), (16384, 256, 16, 1))
buf166 = buf165
del buf165
triton_poi_fused_convolution_9[grid(65536)](buf166, primals_61,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_61
buf167 = buf161
del buf161
buf168 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf170 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_10[grid(64)](buf166,
buf167, buf168, buf170, 64, 1024, num_warps=8, num_stages=1)
buf171 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
buf234 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_28[
grid(65536)](buf166, buf167, buf168, primals_56, primals_57,
buf171, buf234, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del buf168
del primals_57
buf172 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_29[grid(32)](buf172, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf173 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.float32)
triton_poi_fused__unsafe_index_constant_pad_nd_30[grid(278784)](buf172,
buf171, buf173, 278784, XBLOCK=512, num_warps=8, num_stages=1)
del buf171
buf174 = extern_kernels.convolution(buf173, primals_62, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf174, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf175 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_31[grid(262144)](buf52, buf174, primals_63,
buf175, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_63
buf176 = extern_kernels.convolution(buf175, primals_64, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf176, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf177 = buf176
del buf176
triton_poi_fused_convolution_5[grid(131072)](buf177, primals_65,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_65
buf178 = buf49
del buf49
buf179 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
buf181 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_red_fused__native_batch_norm_legit_6[grid(32)](buf177,
buf178, buf179, buf181, 32, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf182 = buf174
del buf174
triton_poi_fused__native_batch_norm_legit_relu_7[grid(131072)](buf177,
buf178, buf179, primals_66, primals_67, buf182, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
buf183 = extern_kernels.convolution(buf182, primals_68, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf184 = buf183
del buf183
triton_poi_fused_convolution_5[grid(131072)](buf184, primals_69,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_69
buf185 = buf179
del buf179
buf186 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
buf188 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_red_fused__native_batch_norm_legit_6[grid(32)](buf184,
buf185, buf186, buf188, 32, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf189 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_7[grid(131072)](buf184,
buf185, buf186, primals_66, primals_67, buf189, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
buf190 = extern_kernels.convolution(buf189, primals_70, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf190, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf191 = buf190
del buf190
triton_poi_fused_convolution_5[grid(131072)](buf191, primals_71,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_71
buf192 = buf186
del buf186
buf193 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
buf195 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_red_fused__native_batch_norm_legit_6[grid(32)](buf191,
buf192, buf193, buf195, 32, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf196 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf233 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_32[
grid(131072)](buf191, buf192, buf193, primals_66, primals_67,
buf196, buf233, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_67
buf197 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_33[grid(64)](buf197, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf198 = empty_strided_cuda((4, 32, 65, 65), (135200, 4225, 65, 1),
torch.float32)
triton_poi_fused__unsafe_index_constant_pad_nd_34[grid(540800)](buf197,
buf196, buf198, 540800, XBLOCK=512, num_warps=8, num_stages=1)
del buf196
buf199 = extern_kernels.convolution(buf198, primals_72, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf199, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf200 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_35[grid(524288)](buf29, buf199, primals_73,
buf200, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_73
buf201 = extern_kernels.convolution(buf200, primals_74, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf201, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf202 = buf201
del buf201
triton_poi_fused_convolution_0[grid(262144)](buf202, primals_75,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_75
buf203 = reinterpret_tensor(buf193, (1, 16, 1, 1, 2), (32, 1, 32,
32, 16), 0)
del buf193
buf204 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16),
torch.float32)
buf205 = empty_strided_cuda((1, 16, 1, 1, 2), (32, 1, 32, 32, 16),
torch.float32)
triton_red_fused__native_batch_norm_legit_1[grid(32)](buf202,
buf203, buf204, buf205, 32, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf206 = buf26
del buf26
buf207 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf209 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_2[grid(16)](buf203,
buf204, buf205, buf206, buf207, buf209, 16, 2, XBLOCK=8,
num_warps=2, num_stages=1)
buf210 = buf199
del buf199
triton_poi_fused__native_batch_norm_legit_relu_3[grid(262144)](buf202,
buf206, buf207, primals_76, primals_77, buf210, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
buf211 = extern_kernels.convolution(buf210, primals_78, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf211, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf212 = buf211
del buf211
triton_poi_fused_convolution_0[grid(262144)](buf212, primals_79,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_79
buf213 = buf205
del buf205
buf214 = buf204
del buf204
buf215 = buf203
del buf203
triton_red_fused__native_batch_norm_legit_1[grid(32)](buf212,
buf213, buf214, buf215, 32, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf216 = buf207
del buf207
buf217 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf219 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_2[grid(16)](buf213,
buf214, buf215, buf216, buf217, buf219, 16, 2, XBLOCK=8,
num_warps=2, num_stages=1)
buf220 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_3[grid(262144)](buf212,
buf216, buf217, primals_76, primals_77, buf220, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
buf221 = extern_kernels.convolution(buf220, primals_80, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf221, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf222 = buf221
del buf221
triton_poi_fused_convolution_0[grid(262144)](buf222, primals_81,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_81
buf223 = buf215
del buf215
buf224 = buf214
del buf214
buf225 = buf213
del buf213
triton_red_fused__native_batch_norm_legit_1[grid(32)](buf222,
buf223, buf224, buf225, 32, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf226 = buf217
del buf217
buf227 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf229 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_2[grid(16)](buf223,
buf224, buf225, buf226, buf227, buf229, 16, 2, XBLOCK=8,
num_warps=2, num_stages=1)
del buf223
del buf224
del buf225
buf230 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
triton_poi_fused__native_batch_norm_legit_relu_3[grid(262144)](buf222,
buf226, buf227, primals_76, primals_77, buf230, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del buf227
del primals_77
buf231 = extern_kernels.convolution(buf230, primals_82, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf231, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf232 = buf231
del buf231
triton_poi_fused_convolution_36[grid(16384)](buf232, primals_83,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_83
return (buf232, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, primals_48,
primals_50, primals_52, primals_54, primals_56, primals_58,
primals_60, primals_62, primals_64, primals_66, primals_68,
primals_70, primals_72, primals_74, primals_76, primals_78,
primals_80, primals_82, buf1, reinterpret_tensor(buf8, (16,), (1,),
0), buf9, buf11, reinterpret_tensor(buf18, (16,), (1,), 0), buf19,
buf21, reinterpret_tensor(buf28, (16,), (1,), 0), buf29, buf30,
buf31, buf33, reinterpret_tensor(buf37, (32,), (1,), 0), buf38,
buf40, reinterpret_tensor(buf44, (32,), (1,), 0), buf45, buf47,
reinterpret_tensor(buf51, (32,), (1,), 0), buf52, buf53, buf54,
buf56, reinterpret_tensor(buf60, (64,), (1,), 0), buf61, buf63,
reinterpret_tensor(buf67, (64,), (1,), 0), buf68, buf70,
reinterpret_tensor(buf74, (64,), (1,), 0), buf75, buf76, buf77,
buf79, reinterpret_tensor(buf83, (128,), (1,), 0), buf84, buf86,
reinterpret_tensor(buf90, (128,), (1,), 0), buf91, buf93,
reinterpret_tensor(buf97, (128,), (1,), 0), buf98, buf99, buf100,
buf102, reinterpret_tensor(buf106, (256,), (1,), 0), buf107, buf109,
reinterpret_tensor(buf113, (256,), (1,), 0), buf114, buf116,
reinterpret_tensor(buf120, (256,), (1,), 0), buf122, buf123, buf125,
buf127, reinterpret_tensor(buf131, (128,), (1,), 0), buf132, buf134,
reinterpret_tensor(buf138, (128,), (1,), 0), buf139, buf141,
reinterpret_tensor(buf145, (128,), (1,), 0), buf147, buf148, buf150,
buf152, reinterpret_tensor(buf156, (64,), (1,), 0), buf157, buf159,
reinterpret_tensor(buf163, (64,), (1,), 0), buf164, buf166,
reinterpret_tensor(buf170, (64,), (1,), 0), buf172, buf173, buf175,
buf177, reinterpret_tensor(buf181, (32,), (1,), 0), buf182, buf184,
reinterpret_tensor(buf188, (32,), (1,), 0), buf189, buf191,
reinterpret_tensor(buf195, (32,), (1,), 0), buf197, buf198, buf200,
buf202, reinterpret_tensor(buf209, (16,), (1,), 0), buf210, buf212,
reinterpret_tensor(buf219, (16,), (1,), 0), buf220, buf222,
reinterpret_tensor(buf229, (16,), (1,), 0), buf230,
reinterpret_tensor(buf226, (1, 16, 1, 1), (16, 1, 1, 1), 0),
reinterpret_tensor(buf216, (1, 16, 1, 1), (16, 1, 1, 1), 0),
reinterpret_tensor(buf206, (1, 16, 1, 1), (16, 1, 1, 1), 0), buf233,
reinterpret_tensor(buf192, (1, 32, 1, 1), (32, 1, 1, 1), 0),
reinterpret_tensor(buf185, (1, 32, 1, 1), (32, 1, 1, 1), 0),
reinterpret_tensor(buf178, (1, 32, 1, 1), (32, 1, 1, 1), 0), buf234,
reinterpret_tensor(buf167, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf160, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf153, (1, 64, 1, 1), (64, 1, 1, 1), 0), buf235,
reinterpret_tensor(buf142, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf135, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf128, (1, 128, 1, 1), (128, 1, 1, 1), 0),
buf236, reinterpret_tensor(buf117, (1, 256, 1, 1), (256, 1, 1, 1),
0), reinterpret_tensor(buf110, (1, 256, 1, 1), (256, 1, 1, 1), 0),
reinterpret_tensor(buf103, (1, 256, 1, 1), (256, 1, 1, 1), 0),
reinterpret_tensor(buf94, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf87, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf80, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf71, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf64, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf57, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf48, (1, 32, 1, 1), (32, 1, 1, 1), 0),
reinterpret_tensor(buf41, (1, 32, 1, 1), (32, 1, 1, 1), 0),
reinterpret_tensor(buf34, (1, 32, 1, 1), (32, 1, 1, 1), 0),
reinterpret_tensor(buf25, (1, 16, 1, 1), (16, 1, 1, 1), 0),
reinterpret_tensor(buf15, (1, 16, 1, 1), (16, 1, 1, 1), 0),
reinterpret_tensor(buf5, (1, 16, 1, 1), (16, 1, 1, 1), 0))
def conv3x3(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=True)
def maxpool2x2():
return nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
def concat(xh, xv):
return torch.cat([xh, xv], dim=1)
class UpConv2x2(nn.Module):
def __init__(self, channels):
super(UpConv2x2, self).__init__()
self.conv = nn.Conv2d(channels, channels // 2, kernel_size=2,
stride=1, padding=0, bias=True)
def forward(self, x):
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = F.pad(x, (0, 1, 0, 1))
x = self.conv(x)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super(ConvBlock, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, x):
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class DownConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super(DownConvBlock, self).__init__()
self.maxpool = maxpool2x2()
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, x):
x = self.maxpool(x)
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class UpConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super(UpConvBlock, self).__init__()
self.upconv = UpConv2x2(in_channels)
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, xh, xv):
"""
Args:
xh: torch Variable, activations from same resolution feature maps (gray arrow in diagram)
xv: torch Variable, activations from lower resolution feature maps (green arrow in diagram)
"""
xv = self.upconv(xv)
x = concat(xh, xv)
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class UNetNew(nn.Module):
def __init__(self):
super(UNetNew, self).__init__()
fs = [16, 32, 64, 128, 256]
self.conv_in = ConvBlock(1, fs[0])
self.dconv1 = DownConvBlock(fs[0], fs[1])
self.dconv2 = DownConvBlock(fs[1], fs[2])
self.dconv3 = DownConvBlock(fs[2], fs[3])
self.dconv4 = DownConvBlock(fs[3], fs[4])
self.uconv1 = UpConvBlock(fs[4], fs[3])
self.uconv2 = UpConvBlock(fs[3], fs[2])
self.uconv3 = UpConvBlock(fs[2], fs[1])
self.uconv4 = UpConvBlock(fs[1], fs[0])
self.conv_out = conv3x3(fs[0], 1)
self._initialize_weights()
def _initialize_weights(self):
conv_modules = [m for m in self.modules() if isinstance(m, nn.Conv2d)]
for m in conv_modules:
n = m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3]
m.weight.data.normal_(0, np.sqrt(2.0 / n))
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv_in.conv1.weight
primals_2 = self.conv_in.conv1.bias
primals_6 = self.conv_in.conv2.weight
primals_4 = self.conv_in.conv2.bias
primals_8 = self.conv_in.conv3.weight
primals_5 = self.conv_in.conv3.bias
primals_7 = self.conv_in.norm.weight
primals_9 = self.conv_in.norm.bias
primals_10 = self.dconv1.conv1.weight
primals_11 = self.dconv1.conv1.bias
primals_14 = self.dconv1.conv2.weight
primals_12 = self.dconv1.conv2.bias
primals_16 = self.dconv1.conv3.weight
primals_13 = self.dconv1.conv3.bias
primals_15 = self.dconv1.norm.weight
primals_17 = self.dconv1.norm.bias
primals_18 = self.dconv2.conv1.weight
primals_19 = self.dconv2.conv1.bias
primals_22 = self.dconv2.conv2.weight
primals_20 = self.dconv2.conv2.bias
primals_24 = self.dconv2.conv3.weight
primals_21 = self.dconv2.conv3.bias
primals_23 = self.dconv2.norm.weight
primals_25 = self.dconv2.norm.bias
primals_26 = self.dconv3.conv1.weight
primals_27 = self.dconv3.conv1.bias
primals_30 = self.dconv3.conv2.weight
primals_28 = self.dconv3.conv2.bias
primals_32 = self.dconv3.conv3.weight
primals_29 = self.dconv3.conv3.bias
primals_31 = self.dconv3.norm.weight
primals_33 = self.dconv3.norm.bias
primals_34 = self.dconv4.conv1.weight
primals_35 = self.dconv4.conv1.bias
primals_38 = self.dconv4.conv2.weight
primals_36 = self.dconv4.conv2.bias
primals_40 = self.dconv4.conv3.weight
primals_37 = self.dconv4.conv3.bias
primals_39 = self.dconv4.norm.weight
primals_41 = self.dconv4.norm.bias
primals_42 = self.uconv1.upconv.conv.weight
primals_43 = self.uconv1.upconv.conv.bias
primals_44 = self.uconv1.conv1.weight
primals_45 = self.uconv1.conv1.bias
primals_48 = self.uconv1.conv2.weight
primals_46 = self.uconv1.conv2.bias
primals_50 = self.uconv1.conv3.weight
primals_47 = self.uconv1.conv3.bias
primals_49 = self.uconv1.norm.weight
primals_51 = self.uconv1.norm.bias
primals_52 = self.uconv2.upconv.conv.weight
primals_53 = self.uconv2.upconv.conv.bias
primals_54 = self.uconv2.conv1.weight
primals_55 = self.uconv2.conv1.bias
primals_58 = self.uconv2.conv2.weight
primals_56 = self.uconv2.conv2.bias
primals_60 = self.uconv2.conv3.weight
primals_57 = self.uconv2.conv3.bias
primals_59 = self.uconv2.norm.weight
primals_61 = self.uconv2.norm.bias
primals_62 = self.uconv3.upconv.conv.weight
primals_63 = self.uconv3.upconv.conv.bias
primals_64 = self.uconv3.conv1.weight
primals_65 = self.uconv3.conv1.bias
primals_68 = self.uconv3.conv2.weight
primals_66 = self.uconv3.conv2.bias
primals_70 = self.uconv3.conv3.weight
primals_67 = self.uconv3.conv3.bias
primals_69 = self.uconv3.norm.weight
primals_71 = self.uconv3.norm.bias
primals_72 = self.uconv4.upconv.conv.weight
primals_73 = self.uconv4.upconv.conv.bias
primals_74 = self.uconv4.conv1.weight
primals_75 = self.uconv4.conv1.bias
primals_78 = self.uconv4.conv2.weight
primals_76 = self.uconv4.conv2.bias
primals_80 = self.uconv4.conv3.weight
primals_77 = self.uconv4.conv3.bias
primals_79 = self.uconv4.norm.weight
primals_81 = self.uconv4.norm.bias
primals_82 = self.conv_out.weight
primals_83 = self.conv_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83])
return output[0]
| jabae/detectEM | UNet | false | 7,052 | [
"MIT"
] | 1 | 2d1a5116164d0bed0a8ea767a227d05a8970a448 | https://github.com/jabae/detectEM/tree/2d1a5116164d0bed0a8ea767a227d05a8970a448 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.cuda import *
def conv3x3(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=True)
def maxpool2x2():
return nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
def concat(xh, xv):
return torch.cat([xh, xv], dim=1)
class UpConv2x2(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv = nn.Conv2d(channels, channels // 2, kernel_size=2,
stride=1, padding=0, bias=True)
def forward(self, x):
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = F.pad(x, (0, 1, 0, 1))
x = self.conv(x)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super().__init__()
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, x):
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class DownConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super().__init__()
self.maxpool = maxpool2x2()
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, x):
x = self.maxpool(x)
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class UpConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
"""
Args:
in_channels: number of channels in input (1st) feature map
out_channels: number of channels in output feature maps
"""
super().__init__()
self.upconv = UpConv2x2(in_channels)
self.conv1 = conv3x3(in_channels, out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.norm = nn.BatchNorm2d(out_channels, track_running_stats=False)
def forward(self, xh, xv):
"""
Args:
xh: torch Variable, activations from same resolution feature maps (gray arrow in diagram)
xv: torch Variable, activations from lower resolution feature maps (green arrow in diagram)
"""
xv = self.upconv(xv)
x = concat(xh, xv)
x = F.relu(self.norm(self.conv1(x)))
x = F.relu(self.norm(self.conv2(x)))
x = F.relu(self.norm(self.conv3(x)))
return x
class Model(nn.Module):
def __init__(self):
super().__init__()
fs = [16, 32, 64, 128, 256]
self.conv_in = ConvBlock(1, fs[0])
self.dconv1 = DownConvBlock(fs[0], fs[1])
self.dconv2 = DownConvBlock(fs[1], fs[2])
self.dconv3 = DownConvBlock(fs[2], fs[3])
self.dconv4 = DownConvBlock(fs[3], fs[4])
self.uconv1 = UpConvBlock(fs[4], fs[3])
self.uconv2 = UpConvBlock(fs[3], fs[2])
self.uconv3 = UpConvBlock(fs[2], fs[1])
self.uconv4 = UpConvBlock(fs[1], fs[0])
self.conv_out = conv3x3(fs[0], 1)
self._initialize_weights()
def forward(self, x):
# ... truncated (>4000 chars) for memory efficiency |
SoftEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax_1, exp_1, sub_2
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bg/cbg32drchyezvbfwshguvyopixmzwi2llws7xkhvpdruis76tr2t.py
# Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_probs => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qb/cqbakowgciuzs25w7ws4yr2pp5cb457kjb4ikooghfei7p5xsin5.py
# Topologically Sorted Source Nodes: [softmax, neg, log_probs, mul], Original ATen: [aten._softmax, aten.neg, aten._log_softmax, aten.mul]
# Source node to ATen node mapping:
# log_probs => exp, log, sub_1, sum_1
# mul => mul
# neg => neg
# softmax => div, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_1), kwargs = {})
triton_poi_fused__log_softmax__softmax_mul_neg_2 = async_compile.triton('triton_poi_fused__log_softmax__softmax_mul_neg_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_mul_neg_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (x3), xmask)
tmp11 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = -tmp8
tmp12 = tl_math.exp(tmp11)
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp12 + tmp14
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp15 + tmp17
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tl_math.log(tmp21)
tmp23 = tmp10 - tmp22
tmp24 = tmp9 * tmp23
tl.store(out_ptr0 + (x3), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sv/csvwppyeutu2vnic34436e4qzo4dezwyeghwukd426sewew73kif.py
# Topologically Sorted Source Nodes: [mean, loss], Original ATen: [aten.mean, aten.sum]
# Source node to ATen node mapping:
# loss => sum_3
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [0]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mean,), kwargs = {})
triton_per_fused_mean_sum_3 = async_compile.triton('triton_per_fused_mean_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, neg, log_probs, mul], Original ATen: [aten._softmax, aten.neg, aten._log_softmax, aten.mul]
triton_poi_fused__log_softmax__softmax_mul_neg_2.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mean, loss], Original ATen: [aten.mean, aten.sum]
triton_per_fused_mean_sum_3.run(buf2, buf3, 1, 64, grid=grid(1), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim.lr_scheduler import *
class SoftEntropy(nn.Module):
def __init__(self):
super(SoftEntropy, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
loss = (-F.softmax(targets, dim=1).detach() * log_probs).mean(0).sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + x3, xmask)
tmp11 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = -tmp8
tmp12 = tl_math.exp(tmp11)
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp12 + tmp14
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp15 + tmp17
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tl_math.log(tmp21)
tmp23 = tmp10 - tmp22
tmp24 = tmp9 * tmp23
tl.store(out_ptr0 + x3, tmp24, xmask)
@triton.jit
def triton_per_fused_mean_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(256)](buf0,
buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_mean_sum_3[grid(1)](buf2, buf3, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del buf2
return buf3,
class SoftEntropyNew(nn.Module):
def __init__(self):
super(SoftEntropyNew, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| knifefield/uda-reid-contest | SoftEntropy | false | 7,053 | [
"MIT"
] | 1 | 8b642cb4c5e63bb1dbfb07d0ac6dacdc26729e91 | https://github.com/knifefield/uda-reid-contest/tree/8b642cb4c5e63bb1dbfb07d0ac6dacdc26729e91 | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim.lr_scheduler import *
class Model(nn.Module):
def __init__(self):
super().__init__()
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
loss = (-F.softmax(targets, dim=1).detach() * log_probs).mean(0).sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AugCNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ur/curzudn4ai4j7lgrmbqwy57jpcw3gylwk4nkg6jt7lqh577w5ku7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 49152, grid=grid(49152), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNN(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNN, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, obs):
return self.aug(obs)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNNNew(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNNNew, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, input_0):
primals_2 = self.aug.weight
primals_3 = self.aug.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| krg-nandu/prj-taxRL | AugCNN | false | 7,054 | [
"MIT"
] | 1 | be65d004c196aff73714dcb346c814ae97db30e2 | https://github.com/krg-nandu/prj-taxRL/tree/be65d004c196aff73714dcb346c814ae97db30e2 | import torch
import torch.nn as nn
from torch.nn import functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class Model(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super().__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, obs):
return self.aug(obs)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
PSNRLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bz/cbz4jfqnivmqo3kagpgcw2e6ojdrfmyv6p6tqz7c2t57rwabulea.py
# Topologically Sorted Source Nodes: [mul, mse_val, truediv, log10, mul_1], Original ATen: [aten.mul, aten.mse_loss, aten.div, aten.log10]
# Source node to ATen node mapping:
# log10 => log10
# mse_val => mean, pow_1, sub
# mul => full_default_1
# mul_1 => mul_1
# truediv => div
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 16), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%full_default_1, %mean), kwargs = {})
# %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%div,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, 10), kwargs = {})
triton_per_fused_div_log10_mse_loss_mul_0 = async_compile.triton('triton_per_fused_div_log10_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_log10_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 16.0
tmp10 = tmp9 / tmp8
tmp11 = libdevice.log10(tmp10)
tmp12 = 10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, mse_val, truediv, log10, mul_1], Original ATen: [aten.mul, aten.mse_loss, aten.div, aten.log10]
stream0 = get_raw_stream(0)
triton_per_fused_div_log10_mse_loss_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float'
) ->torch.Tensor:
"""Function that computes PSNR
See :class:`~kornia.losses.PSNR` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(
f'Expected 2 torch tensors but got {type(input)} and {type(target)}'
)
if input.shape != target.shape:
raise TypeError(
f'Expected tensors of equal shapes, but got {input.shape} and {target.shape}'
)
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: 'torch.Tensor' = torch.tensor(max_val)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
class PSNRLoss(nn.Module):
"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image,
.. math::
\\text{MSE}(I,T) = \\frac{1}{m\\,n}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: 'float') ->None:
super(PSNRLoss, self).__init__()
self.max_val = max_val
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
return psnr_loss(input, target, self.max_val)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'max_val': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.functional import mse_loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 16.0
tmp10 = tmp9 / tmp8
tmp11 = libdevice.log10(tmp10)
tmp12 = 10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_log10_mse_loss_mul_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float'
) ->torch.Tensor:
"""Function that computes PSNR
See :class:`~kornia.losses.PSNR` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(
f'Expected 2 torch tensors but got {type(input)} and {type(target)}'
)
if input.shape != target.shape:
raise TypeError(
f'Expected tensors of equal shapes, but got {input.shape} and {target.shape}'
)
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: 'torch.Tensor' = torch.tensor(max_val)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
class PSNRLossNew(nn.Module):
"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image,
.. math::
\\text{MSE}(I,T) = \\frac{1}{m\\,n}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: 'float') ->None:
super(PSNRLossNew, self).__init__()
self.max_val = max_val
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kshitij12345/kornia | PSNRLoss | false | 7,055 | [
"Apache-2.0"
] | 1 | 4fcc9a570dfa38f67ec812c8fdfabe434b3e466e | https://github.com/kshitij12345/kornia/tree/4fcc9a570dfa38f67ec812c8fdfabe434b3e466e | import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float'
) ->torch.Tensor:
"""Function that computes PSNR
See :class:`~kornia.losses.PSNR` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(
f'Expected 2 torch tensors but got {type(input)} and {type(target)}'
)
if input.shape != target.shape:
raise TypeError(
f'Expected tensors of equal shapes, but got {input.shape} and {target.shape}'
)
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: 'torch.Tensor' = torch.tensor(max_val)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
class Model(nn.Module):
"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image,
.. math::
\\text{MSE}(I,T) = \\frac{1}{m\\,n}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: 'float') ->None:
super().__init__()
self.max_val = max_val
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
return psnr_loss(input, target, self.max_val)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SeparatedLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rc/crctfeth7b4kpmchbcpls75t2tcus6ydvzhpx7v2oob5343reeyh.py
# Topologically Sorted Source Nodes: [loss_disease_risk], Original ATen: [aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# loss_disease_risk => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %select), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %select), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp3 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2u/c2uxl2cl3y5rphtmxmzkjuv6zfm2xdp3otpslljz6ia2xslr77wx.py
# Topologically Sorted Source Nodes: [loss_disease_risk, mul_5, mul, probas, sub, pow_1, mul_1, bce_loss, mul_2, sub_1, pow_2, mul_3, mul_4, loss, loss_1, mul_6, add_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.sigmoid, aten.rsub, aten.pow, aten.add, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# bce_loss => abs_2, exp_1, full_default_1, log1p_1, minimum_1, mul_1, neg_1, sub_3, sub_4, sub_5
# loss => add
# loss_1 => mean_1
# loss_disease_risk => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# mul => mul_2
# mul_1 => mul_3
# mul_2 => mul_4
# mul_3 => mul_5
# mul_4 => mul_6
# mul_5 => mul_7
# mul_6 => mul_8
# pow_1 => pow_1
# pow_2 => pow_2
# probas => sigmoid
# sub => sub_6
# sub_1 => sub_7
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %select), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %select), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%select,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_6, 0.25), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_4,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_6, 2.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %pow_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %slice_6), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %slice_4), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %slice_4), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%slice_4,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_1, %log1p_1), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %slice_6), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %pow_2), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %sub_5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_6), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %mul_8), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_1 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 48
r1 = (rindex // 48)
tmp0 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), rmask, other=0.0)
tmp3 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), rmask, other=0.0)
tmp28 = tl.load(in_out_ptr0 + (0))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, 1])
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp5 - tmp0
tmp10 = tmp9 * tmp3
tmp11 = 0.0
tmp12 = triton_helpers.minimum(tmp11, tmp3)
tmp13 = tl_math.abs(tmp3)
tmp14 = -tmp13
tmp15 = tl_math.exp(tmp14)
tmp16 = libdevice.log1p(tmp15)
tmp17 = tmp12 - tmp16
tmp18 = tmp10 - tmp17
tmp19 = tmp8 * tmp18
tmp20 = tmp4 * tmp4
tmp21 = tmp9 * tmp20
tmp22 = tmp21 * tmp18
tmp23 = tmp19 + tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.where(rmask, tmp24, 0)
tmp27 = tl.sum(tmp26, 1)[:, None]
tmp30 = 64.0
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp5
tmp33 = 192.0
tmp34 = tmp27 / tmp33
tmp35 = tmp34 * tmp5
tmp36 = tmp32 + tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp36, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [loss_disease_risk], Original ATen: [aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0.run(arg1_1, arg0_1, buf0, 1, 64, grid=grid(1), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss_disease_risk, mul_5, mul, probas, sub, pow_1, mul_1, bce_loss, mul_2, sub_1, pow_2, mul_3, mul_4, loss, loss_1, mul_6, add_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.sigmoid, aten.rsub, aten.pow, aten.add, aten.mean]
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_1.run(buf2, arg1_1, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BCEFocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * (1.0 - probas
) ** self.gamma * bce_loss + (1.0 - targets
) * probas ** self.gamma * bce_loss
loss = loss.mean()
return loss
class SeparatedLoss(nn.Module):
def __init__(self, loss_disease_risk='BCEWithLogitsLoss', loss_disease=
'BCEFocalLoss', weights=[1.0, 1.0]):
super().__init__()
if loss_disease_risk == 'BCEWithLogitsLoss':
self.loss_disease_risk = nn.BCEWithLogitsLoss()
elif loss_disease_risk == 'BCEFocalLoss':
self.loss_disease_risk = BCEFocalLoss()
else:
raise NotImplementedError
if loss_disease == 'BCEWithLogitsLoss':
self.loss_disease = nn.BCEWithLogitsLoss()
elif loss_disease == 'BCEFocalLoss':
self.loss_disease = BCEFocalLoss()
self.weights = weights
def forward(self, preds, targets):
risk_pred = preds[:, 0]
risk_targ = targets[:, 0]
disease_pred = preds[:, 1:]
disease_targ = targets[:, 1:]
loss_disease_risk = self.loss_disease_risk(risk_pred, risk_targ)
loss_disease = self.loss_disease(disease_pred, disease_targ)
return self.weights[0] * loss_disease_risk + self.weights[1
] * loss_disease
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_1(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 48
r1 = rindex // 48
tmp0 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), rmask, other=0.0)
tmp3 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), rmask, other=0.0)
tmp28 = tl.load(in_out_ptr0 + 0)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, 1])
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp5 - tmp0
tmp10 = tmp9 * tmp3
tmp11 = 0.0
tmp12 = triton_helpers.minimum(tmp11, tmp3)
tmp13 = tl_math.abs(tmp3)
tmp14 = -tmp13
tmp15 = tl_math.exp(tmp14)
tmp16 = libdevice.log1p(tmp15)
tmp17 = tmp12 - tmp16
tmp18 = tmp10 - tmp17
tmp19 = tmp8 * tmp18
tmp20 = tmp4 * tmp4
tmp21 = tmp9 * tmp20
tmp22 = tmp21 * tmp18
tmp23 = tmp19 + tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.where(rmask, tmp24, 0)
tmp27 = tl.sum(tmp26, 1)[:, None]
tmp30 = 64.0
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp5
tmp33 = 192.0
tmp34 = tmp27 / tmp33
tmp35 = tmp34 * tmp5
tmp36 = tmp32 + tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg1_1,
arg0_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf2 = buf0
del buf0
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_1[
grid(1)](buf2, arg1_1, arg0_1, 1, 192, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf2,
class BCEFocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * (1.0 - probas
) ** self.gamma * bce_loss + (1.0 - targets
) * probas ** self.gamma * bce_loss
loss = loss.mean()
return loss
class SeparatedLossNew(nn.Module):
def __init__(self, loss_disease_risk='BCEWithLogitsLoss', loss_disease=
'BCEFocalLoss', weights=[1.0, 1.0]):
super().__init__()
if loss_disease_risk == 'BCEWithLogitsLoss':
self.loss_disease_risk = nn.BCEWithLogitsLoss()
elif loss_disease_risk == 'BCEFocalLoss':
self.loss_disease_risk = BCEFocalLoss()
else:
raise NotImplementedError
if loss_disease == 'BCEWithLogitsLoss':
self.loss_disease = nn.BCEWithLogitsLoss()
elif loss_disease == 'BCEFocalLoss':
self.loss_disease = BCEFocalLoss()
self.weights = weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| koukyo1994/riadd-competition | SeparatedLoss | false | 7,056 | [
"MIT"
] | 1 | 0e399305aef21d40125cadccee55be1f0b310216 | https://github.com/koukyo1994/riadd-competition/tree/0e399305aef21d40125cadccee55be1f0b310216 | import torch
import torch.nn as nn
class BCEFocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * (1.0 - probas
) ** self.gamma * bce_loss + (1.0 - targets
) * probas ** self.gamma * bce_loss
loss = loss.mean()
return loss
class Model(nn.Module):
def __init__(self, loss_disease_risk='BCEWithLogitsLoss', loss_disease=
'BCEFocalLoss', weights=[1.0, 1.0]):
super().__init__()
if loss_disease_risk == 'BCEWithLogitsLoss':
self.loss_disease_risk = nn.BCEWithLogitsLoss()
elif loss_disease_risk == 'BCEFocalLoss':
self.loss_disease_risk = BCEFocalLoss()
else:
raise NotImplementedError
if loss_disease == 'BCEWithLogitsLoss':
self.loss_disease = nn.BCEWithLogitsLoss()
elif loss_disease == 'BCEFocalLoss':
self.loss_disease = BCEFocalLoss()
self.weights = weights
def forward(self, preds, targets):
risk_pred = preds[:, 0]
risk_targ = targets[:, 0]
disease_pred = preds[:, 1:]
disease_targ = targets[:, 1:]
loss_disease_risk = self.loss_disease_risk(risk_pred, risk_targ)
loss_disease = self.loss_disease(disease_pred, disease_targ)
return self.weights[0] * loss_disease_risk + self.weights[1
] * loss_disease
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MyNet | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py
# Topologically Sorted Source Nodes: [elu], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# elu => expm1, gt, mul, mul_1, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [elu], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.testing._internal.common_utils import *
class MyNet(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, x):
return self.elu(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.testing._internal.common_utils import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MyNetNew(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| LexcaliburR/notebook | MyNet | false | 7,057 | [
"MIT"
] | 1 | 84a8f3801dff20d07caa0ed2584e722656fb5726 | https://github.com/LexcaliburR/notebook/tree/84a8f3801dff20d07caa0ed2584e722656fb5726 | import torch
import torch.nn as nn
from torch.testing._internal.common_utils import *
class Model(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, x):
return self.elu(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bn/cbnw4uewpv7yyudtpcdoczlgvlxmpcnzej45i7ca2geg5hg5zbwg.py
# Topologically Sorted Source Nodes: [dist, clamp, dist_1, eq, mat_sim, sub, mul, add_1, sort, mul_1, add_2, sort_1], Original ATen: [aten.add, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.mul, aten.sort]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# clamp => clamp_min
# dist => add
# dist_1 => sqrt
# eq => eq
# mat_sim => convert_element_type
# mul => mul
# mul_1 => mul_1
# sort => sort
# sort_1 => sort_1
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %permute), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %add), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_tensor, 1e-12), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%expand_2, %permute_2), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -9999999.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_1, 1, True), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 9999999.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_1), kwargs = {})
# %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_2, 1), kwargs = {})
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0 = async_compile.triton('triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0)
tmp29 = tl.load(in_ptr1 + (x0 + (4*r1)), xmask, other=0.0)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp11 + tmp22
tmp24 = tmp0 + tmp23
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = libdevice.sqrt(tmp26)
tmp30 = tmp28 == tmp29
tmp31 = tmp30.to(tl.float32)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = -9999999.0
tmp35 = tmp33 * tmp34
tmp36 = tmp27 + tmp35
tmp37 = r1
tmp38 = tmp37.to(tl.int16)
tmp39 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp40 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41, tmp42, = triton_helpers.sort_with_index(tmp39, tmp40, None, 1, stable=False, descending=True)
tmp43 = 9999999.0
tmp44 = tmp31 * tmp43
tmp45 = tmp27 + tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp47, tmp48, = triton_helpers.sort_with_index(tmp46, tmp40, None, 1, stable=False, descending=False)
tl.store(in_out_ptr0 + (r1 + (4*x0)), tmp24, xmask)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp41, xmask)
tl.store(out_ptr1 + (r1 + (4*x0)), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gf/cgfachfggtjixs7ghy3nu6e2mp4ksruzavt42jz66ogrq5k6bhsm.py
# Topologically Sorted Source Nodes: [loss, gt, sum_3, mul_2, prec], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean, aten.gt, aten.sum, aten.div]
# Source node to ATen node mapping:
# gt => gt
# loss => add_3, clamp_min_1, full_default, mean, mul_2, sub_1
# mul_2 => mul_3
# prec => div
# sum_3 => sum_3
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, %select), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %sub_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%mul_2, 4.0), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min_1,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%select_1, %select), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%gt,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, 4), kwargs = {})
triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1 = async_compile.triton('triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = -1.0
tmp4 = tmp3 * tmp2
tmp5 = 4.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp0 > tmp1
tmp13 = tmp12.to(tl.int64)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp16.to(tl.float32)
tmp18 = 1.0
tmp19 = tmp17 * tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tmp22 = tmp11 / tmp5
tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dist, clamp, dist_1, eq, mat_sim, sub, mul, add_1, sort, mul_1, add_2, sort_1], Original ATen: [aten.add, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.mul, aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0.run(buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf9 = empty_strided_cuda((), (), torch.float32)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [loss, gt, sum_3, mul_2, prec], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean, aten.gt, aten.sum, aten.div]
triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1.run(buf8, buf4, buf2, buf9, 1, 4, grid=grid(1), stream=stream0)
del buf2
del buf4
return (buf8, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim.lr_scheduler import *
def _batch_hard(mat_distance, mat_similarity, indice=False):
sorted_mat_distance, positive_indices = torch.sort(mat_distance + -
9999999.0 * (1 - mat_similarity), dim=1, descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance +
9999999.0 * mat_similarity, dim=1, descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if indice:
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
class TripletLoss(nn.Module):
"""
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, emb, label):
if self.normalize_feature:
emb = F.normalize(emb)
mat_dist = euclidean_dist(emb, emb)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an = _batch_hard(mat_dist, mat_sim)
assert dist_an.size(0) == dist_ap.size(0)
y = torch.ones_like(dist_ap)
loss = self.margin_loss(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1.0 / y.size(0)
return loss, prec
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp29 = tl.load(in_ptr1 + (x0 + 4 * r1), xmask, other=0.0)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp11 + tmp22
tmp24 = tmp0 + tmp23
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = libdevice.sqrt(tmp26)
tmp30 = tmp28 == tmp29
tmp31 = tmp30.to(tl.float32)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = -9999999.0
tmp35 = tmp33 * tmp34
tmp36 = tmp27 + tmp35
tmp37 = r1
tmp38 = tmp37.to(tl.int16)
tmp39 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp40 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41, _tmp42 = triton_helpers.sort_with_index(tmp39, tmp40, None, 1,
stable=False, descending=True)
tmp43 = 9999999.0
tmp44 = tmp31 * tmp43
tmp45 = tmp27 + tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp47, _tmp48 = triton_helpers.sort_with_index(tmp46, tmp40, None, 1,
stable=False, descending=False)
tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp24, xmask)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp41, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp47, xmask)
@triton.jit
def triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = -1.0
tmp4 = tmp3 * tmp2
tmp5 = 4.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp0 > tmp1
tmp13 = tmp12.to(tl.int64)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp16.to(tl.float32)
tmp18 = 1.0
tmp19 = tmp17 * tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tmp22 = tmp11 / tmp5
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0[grid(4)](
buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf9 = empty_strided_cuda((), (), torch.float32)
buf8 = buf6
del buf6
triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1[grid(1)](
buf8, buf4, buf2, buf9, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf4
return buf8, buf9
def _batch_hard(mat_distance, mat_similarity, indice=False):
sorted_mat_distance, positive_indices = torch.sort(mat_distance + -
9999999.0 * (1 - mat_similarity), dim=1, descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance +
9999999.0 * mat_similarity, dim=1, descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if indice:
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
class TripletLossNew(nn.Module):
"""
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLossNew, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| knifefield/uda-reid-contest | TripletLoss | false | 7,058 | [
"MIT"
] | 1 | 8b642cb4c5e63bb1dbfb07d0ac6dacdc26729e91 | https://github.com/knifefield/uda-reid-contest/tree/8b642cb4c5e63bb1dbfb07d0ac6dacdc26729e91 | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim.lr_scheduler import *
def _batch_hard(mat_distance, mat_similarity, indice=False):
sorted_mat_distance, positive_indices = torch.sort(mat_distance + -
9999999.0 * (1 - mat_similarity), dim=1, descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance +
9999999.0 * mat_similarity, dim=1, descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if indice:
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
class Model(nn.Module):
"""
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
"""
def __init__(self, margin, normalize_feature=False):
super().__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, emb, label):
if self.normalize_feature:
emb = F.normalize(emb)
mat_dist = euclidean_dist(emb, emb)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an = _batch_hard(mat_dist, mat_sim)
assert dist_an.size(0) == dist_ap.size(0)
y = torch.ones_like(dist_ap)
loss = self.margin_loss(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1.0 / y.size(0)
return loss, prec
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
BinaryFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/w4/cw47jg5ijuae73db7wzukcgsbb5imwlodhvgae5q6dbbjiyvrl47.py
# Topologically Sorted Source Nodes: [sub_1, neg_1, pr, pow_2, mul_3, mul_4, sub_2, log_1, loss_0, neg, sub, pow_1, mul, mul_1, log, loss_1, loss], Original ATen: [aten.rsub, aten.neg, aten.clamp, aten.pow, aten.mul, aten.log, aten.add]
# Source node to ATen node mapping:
# log => log
# log_1 => log_1
# loss => add
# loss_0 => mul_5
# loss_1 => mul_2
# mul => mul
# mul_1 => mul_1
# mul_3 => mul_3
# mul_4 => mul_4
# neg => neg
# neg_1 => neg_1
# pow_1 => pow_1
# pow_2 => pow_2
# pr => clamp_max, clamp_min
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_1,), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-07), kwargs = {})
# %clamp_max : [num_users=4] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.9999999), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_max, 2.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 0.75), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %mul_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %log_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.25), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %mul), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_max,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %mul_2), kwargs = {})
triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_0 = async_compile.triton('triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = -tmp2
tmp5 = 1e-07
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.9999999
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = tmp8 * tmp8
tmp10 = 0.75
tmp11 = tmp9 * tmp10
tmp12 = tmp3 * tmp11
tmp13 = tmp1 - tmp8
tmp14 = tl_math.log(tmp13)
tmp15 = tmp12 * tmp14
tmp16 = -tmp0
tmp17 = tmp13 * tmp13
tmp18 = 0.25
tmp19 = tmp17 * tmp18
tmp20 = tmp16 * tmp19
tmp21 = tl_math.log(tmp8)
tmp22 = tmp20 * tmp21
tmp23 = tmp15 + tmp22
tl.store(out_ptr0 + (x0), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub_1, neg_1, pr, pow_2, mul_3, mul_4, sub_2, log_1, loss_0, neg, sub, pow_1, mul, mul_1, log, loss_1, loss], Original ATen: [aten.rsub, aten.neg, aten.clamp, aten.pow, aten.mul, aten.log, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class BinaryFocalLoss(torch.nn.Module):
""" from https://github.com/qubvel/segmentation_models"""
def __init__(self, gamma=2.0, alpha=0.25, eps=1e-07):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.eps = eps
def forward(self, pr, gt):
pr = torch.clamp(pr, self.eps, 1 - self.eps)
loss_1 = -gt * (self.alpha * torch.pow(1 - pr, self.gamma)
) * torch.log(pr)
loss_0 = -(1 - gt) * ((1 - self.alpha) * torch.pow(pr, self.gamma)
) * torch.log(1 - pr)
loss = loss_0 + loss_1
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = -tmp2
tmp5 = 1e-07
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.9999999
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = tmp8 * tmp8
tmp10 = 0.75
tmp11 = tmp9 * tmp10
tmp12 = tmp3 * tmp11
tmp13 = tmp1 - tmp8
tmp14 = tl_math.log(tmp13)
tmp15 = tmp12 * tmp14
tmp16 = -tmp0
tmp17 = tmp13 * tmp13
tmp18 = 0.25
tmp19 = tmp17 * tmp18
tmp20 = tmp16 * tmp19
tmp21 = tl_math.log(tmp8)
tmp22 = tmp20 * tmp21
tmp23 = tmp15 + tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_0[grid(256)](arg1_1,
arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BinaryFocalLossNew(torch.nn.Module):
""" from https://github.com/qubvel/segmentation_models"""
def __init__(self, gamma=2.0, alpha=0.25, eps=1e-07):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kungfuai/d3m-segmentation-research | BinaryFocalLoss | false | 7,059 | [
"MIT"
] | 1 | 5bc44ddd0e8522fb2b369866ad47aa62a24a8f63 | https://github.com/kungfuai/d3m-segmentation-research/tree/5bc44ddd0e8522fb2b369866ad47aa62a24a8f63 | import torch
class Model(torch.nn.Module):
""" from https://github.com/qubvel/segmentation_models"""
def __init__(self, gamma=2.0, alpha=0.25, eps=1e-07):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.eps = eps
def forward(self, pr, gt):
pr = torch.clamp(pr, self.eps, 1 - self.eps)
loss_1 = -gt * (self.alpha * torch.pow(1 - pr, self.gamma)
) * torch.log(pr)
loss_0 = -(1 - gt) * ((1 - self.alpha) * torch.pow(pr, self.gamma)
) * torch.log(1 - pr)
loss = loss_0 + loss_1
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PredictiveEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mr/cmr3tv5ws2snvq6sc6sxr656wx47kzohgg6mk4czehbbxdzlihyt.py
# Topologically Sorted Source Nodes: [softmax, log_softmax], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax_1, sub_1
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_1), kwargs = {})
triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
tl.store(out_ptr1 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nk/cnk3zca2cotrn52zquetkd3446ncgzdqpvoo7a7vey3gzfcyn5ha.py
# Topologically Sorted Source Nodes: [softmax, log_softmax, b], Original ATen: [aten._softmax, aten._log_softmax, aten.mul]
# Source node to ATen node mapping:
# b => mul
# log_softmax => exp_1, log, sub_2, sum_2
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_2), kwargs = {})
triton_poi_fused__log_softmax__softmax_mul_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x3), xmask)
tmp10 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + (x3), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4t/c4t6rzmozlglaljnnzv44dmy65tvz3df5pemtff4eml5gurscaru.py
# Topologically Sorted Source Nodes: [sum_1, b_1], Original ATen: [aten.sum, aten.mul]
# Source node to ATen node mapping:
# b_1 => mul_1
# sum_1 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, -1.0), kwargs = {})
triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = -1.0
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, log_softmax], Original ATen: [aten._softmax, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0.run(arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, log_softmax, b], Original ATen: [aten._softmax, aten._log_softmax, aten.mul]
triton_poi_fused__log_softmax__softmax_mul_1.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, b_1], Original ATen: [aten.sum, aten.mul]
triton_poi_fused_mul_sum_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class PredictiveEntropy(nn.Module):
def __init__(self):
super(PredictiveEntropy, self).__init__()
def forward(self, x):
b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = -1.0 * b.sum(dim=1)
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + x3, tmp23, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = -1.0
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf0,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_1[grid(256)](buf0, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf3,
class PredictiveEntropyNew(nn.Module):
def __init__(self):
super(PredictiveEntropyNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| kowshikthopalli/MULDENS | PredictiveEntropy | false | 7,060 | [
"MIT"
] | 1 | e2d5f8ec51024c5bdda6d1fcde4a96a3f31e6930 | https://github.com/kowshikthopalli/MULDENS/tree/e2d5f8ec51024c5bdda6d1fcde4a96a3f31e6930 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = -1.0 * b.sum(dim=1)
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CalibrationModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/at/catwgbiwmbg7xppf43cvbcn5d6non7ron2vvi5h77ptca3ltb2j6.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class CalibrationModel(torch.nn.Module):
""" Adds temperature scaling parameters to trained model"""
def __init__(self):
super().__init__()
self.temperature = torch.nn.Parameter(torch.ones(1) * 1.5)
def forward(self, logits, device=torch.device('cpu')):
return logits / self.temperature
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class CalibrationModelNew(torch.nn.Module):
""" Adds temperature scaling parameters to trained model"""
def __init__(self):
super().__init__()
self.temperature = torch.nn.Parameter(torch.ones(1) * 1.5)
def forward(self, input_0):
primals_1 = self.temperature
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| kungfuai/d3m-segmentation-research | CalibrationModel | false | 7,061 | [
"MIT"
] | 1 | 5bc44ddd0e8522fb2b369866ad47aa62a24a8f63 | https://github.com/kungfuai/d3m-segmentation-research/tree/5bc44ddd0e8522fb2b369866ad47aa62a24a8f63 | import torch
class Model(torch.nn.Module):
""" Adds temperature scaling parameters to trained model"""
def __init__(self):
super().__init__()
self.temperature = torch.nn.Parameter(torch.ones(1) * 1.5)
def forward(self, logits, device=torch.device('cpu')):
return logits / self.temperature
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => relu_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/li/clisyfh7uy7myv7uicl6ym42hf2x575nogmdoxa7aohhuh54uign.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out_3 => convolution_1
# out_4 => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %relu), kwargs = {})
# %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %relu), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(buf4, primals_5, buf0, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf4, primals_2, primals_4, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class BasicBlock(nn.Module):
"""
Residual Network Block
"""
def __init__(self, n_channels, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.relu = nn.ReLU(inplace=True)
self.conv2 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.stride = stride
apply_init_(self.modules())
self.train()
def forward(self, x):
identity = x
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
out += identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_2[grid(256)](buf4, primals_5, buf0,
primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf4, primals_2, primals_4, buf0, buf2
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class BasicBlockNew(nn.Module):
"""
Residual Network Block
"""
def __init__(self, n_channels, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.relu = nn.ReLU(inplace=True)
self.conv2 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.stride = stride
apply_init_(self.modules())
self.train()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| krg-nandu/prj-taxRL | BasicBlock | false | 7,062 | [
"MIT"
] | 1 | be65d004c196aff73714dcb346c814ae97db30e2 | https://github.com/krg-nandu/prj-taxRL/tree/be65d004c196aff73714dcb346c814ae97db30e2 | import torch
import torch.nn as nn
from torch.nn import functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class Model(nn.Module):
"""
Residual Network Block
"""
def __init__(self, n_channels, stride=1):
super().__init__()
self.conv1 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.relu = nn.ReLU(inplace=True)
self.conv2 = Conv2d_tf(n_channels, n_channels, kernel_size=3,
stride=1, padding=(1, 1))
self.stride = stride
apply_init_(self.modules())
self.train()
def forward(self, x):
identity = x
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
out += identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Linear_2L_KFRA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rr/crrgtc7cjhmconlc4lrghdtlxlrhusyvwnt3b35beyeisdtl2lgu.py
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
# Source node to ATen node mapping:
# fill_ => full_default
# Graph fragment:
# %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_fill_0 = async_compile.triton('triton_poi_fused_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yn/cyneyfxx44v6cylz6voujw7xntwjaqi4l3utnyjbksdtzfqswrd5.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %full_default], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nc/cncd5yvgjpjfrvyaveva55remy3ckx5quvuhdmurdt3pp6k3qtux.py
# Topologically Sorted Source Nodes: [a1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# a1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_0.run(buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_1, buf1, 20, grid=grid(20), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [a1], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf3, buf4, 20, grid=grid(20), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf5)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [a2], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf6, buf7, 20, grid=grid(20), stream=stream0)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_7
return (buf8, buf7, buf6, buf4, buf3, buf1, buf0, primals_1, buf3, buf6, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
def sample_K_laplace_MN(MAP, upper_Qinv, lower_HHinv):
Z = MAP.data.new(MAP.size()).normal_(mean=0, std=1)
all_mtx_sample = MAP + torch.matmul(torch.matmul(lower_HHinv, Z),
upper_Qinv)
weight_mtx_sample = all_mtx_sample[:, :-1]
bias_mtx_sample = all_mtx_sample[:, -1]
return weight_mtx_sample, bias_mtx_sample
class Linear_2L_KFRA(nn.Module):
def __init__(self, input_dim, output_dim, n_hid):
super(Linear_2L_KFRA, self).__init__()
self.n_hid = n_hid
self.input_dim = input_dim
self.output_dim = output_dim
self.fc1 = nn.Linear(input_dim, self.n_hid)
self.fc2 = nn.Linear(self.n_hid, self.n_hid)
self.fc3 = nn.Linear(self.n_hid, output_dim)
self.act = nn.ReLU(inplace=True)
self.one = None
self.a2 = None
self.h2 = None
self.a1 = None
self.h1 = None
self.a0 = None
def forward(self, x):
self.one = x.new(x.shape[0], 1).fill_(1)
a0 = x.view(-1, self.input_dim)
self.a0 = torch.cat((a0.data, self.one), dim=1)
h1 = self.fc1(a0)
self.h1 = h1.data
a1 = self.act(h1)
self.a1 = torch.cat((a1.data, self.one), dim=1)
h2 = self.fc2(a1)
self.h2 = h2.data
a2 = self.act(h2)
self.a2 = torch.cat((a2.data, self.one), dim=1)
h3 = self.fc3(a2)
return h3
def sample_predict(self, x, Nsamples, Qinv1, HHinv1, MAP1, Qinv2,
HHinv2, MAP2, Qinv3, HHinv3, MAP3):
predictions = x.data.new(Nsamples, x.shape[0], self.output_dim)
x = x.view(-1, self.input_dim)
for i in range(Nsamples):
w1, b1 = sample_K_laplace_MN(MAP1, Qinv1, HHinv1)
a = torch.matmul(x, torch.t(w1)) + b1.unsqueeze(0)
a = self.act(a)
w2, b2 = sample_K_laplace_MN(MAP2, Qinv2, HHinv2)
a = torch.matmul(a, torch.t(w2)) + b2.unsqueeze(0)
a = self.act(a)
w3, b3 = sample_K_laplace_MN(MAP3, Qinv3, HHinv3)
y = torch.matmul(a, torch.t(w3)) + b3.unsqueeze(0)
predictions[i] = y
return predictions
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'n_hid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_fill_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
triton_poi_fused_cat_1[grid(20)](primals_1, buf1, 20, XBLOCK=32,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_2[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
triton_poi_fused_cat_1[grid(20)](buf3, buf4, 20, XBLOCK=32,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_relu_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
triton_poi_fused_cat_1[grid(20)](buf6, buf7, 20, XBLOCK=32,
num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_7
return (buf8, buf7, buf6, buf4, buf3, buf1, buf0, primals_1, buf3, buf6,
primals_6, primals_4)
def sample_K_laplace_MN(MAP, upper_Qinv, lower_HHinv):
Z = MAP.data.new(MAP.size()).normal_(mean=0, std=1)
all_mtx_sample = MAP + torch.matmul(torch.matmul(lower_HHinv, Z),
upper_Qinv)
weight_mtx_sample = all_mtx_sample[:, :-1]
bias_mtx_sample = all_mtx_sample[:, -1]
return weight_mtx_sample, bias_mtx_sample
class Linear_2L_KFRANew(nn.Module):
def __init__(self, input_dim, output_dim, n_hid):
super(Linear_2L_KFRANew, self).__init__()
self.n_hid = n_hid
self.input_dim = input_dim
self.output_dim = output_dim
self.fc1 = nn.Linear(input_dim, self.n_hid)
self.fc2 = nn.Linear(self.n_hid, self.n_hid)
self.fc3 = nn.Linear(self.n_hid, output_dim)
self.act = nn.ReLU(inplace=True)
self.one = None
self.a2 = None
self.h2 = None
self.a1 = None
self.h1 = None
self.a0 = None
def sample_predict(self, x, Nsamples, Qinv1, HHinv1, MAP1, Qinv2,
HHinv2, MAP2, Qinv3, HHinv3, MAP3):
predictions = x.data.new(Nsamples, x.shape[0], self.output_dim)
x = x.view(-1, self.input_dim)
for i in range(Nsamples):
w1, b1 = sample_K_laplace_MN(MAP1, Qinv1, HHinv1)
a = torch.matmul(x, torch.t(w1)) + b1.unsqueeze(0)
a = self.act(a)
w2, b2 = sample_K_laplace_MN(MAP2, Qinv2, HHinv2)
a = torch.matmul(a, torch.t(w2)) + b2.unsqueeze(0)
a = self.act(a)
w3, b3 = sample_K_laplace_MN(MAP3, Qinv3, HHinv3)
y = torch.matmul(a, torch.t(w3)) + b3.unsqueeze(0)
predictions[i] = y
return predictions
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_3 = self.fc1.bias
primals_2 = self.fc2.weight
primals_5 = self.fc2.bias
primals_4 = self.fc3.weight
primals_7 = self.fc3.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kw-lee/Bayesian-Neural-Networks | Linear_2L_KFRA | false | 7,063 | [
"MIT"
] | 1 | 3327fcf85e47c15d86c872211427bff133880c34 | https://github.com/kw-lee/Bayesian-Neural-Networks/tree/3327fcf85e47c15d86c872211427bff133880c34 | import torch
import torch.nn as nn
import torch.utils.data
def sample_K_laplace_MN(MAP, upper_Qinv, lower_HHinv):
Z = MAP.data.new(MAP.size()).normal_(mean=0, std=1)
all_mtx_sample = MAP + torch.matmul(torch.matmul(lower_HHinv, Z),
upper_Qinv)
weight_mtx_sample = all_mtx_sample[:, :-1]
bias_mtx_sample = all_mtx_sample[:, -1]
return weight_mtx_sample, bias_mtx_sample
class Model(nn.Module):
def __init__(self, input_dim, output_dim, n_hid):
super().__init__()
self.n_hid = n_hid
self.input_dim = input_dim
self.output_dim = output_dim
self.fc1 = nn.Linear(input_dim, self.n_hid)
self.fc2 = nn.Linear(self.n_hid, self.n_hid)
self.fc3 = nn.Linear(self.n_hid, output_dim)
self.act = nn.ReLU(inplace=True)
self.one = None
self.a2 = None
self.h2 = None
self.a1 = None
self.h1 = None
self.a0 = None
def forward(self, x):
self.one = x.new(x.shape[0], 1).fill_(1)
a0 = x.view(-1, self.input_dim)
self.a0 = torch.cat((a0.data, self.one), dim=1)
h1 = self.fc1(a0)
self.h1 = h1.data
a1 = self.act(h1)
self.a1 = torch.cat((a1.data, self.one), dim=1)
h2 = self.fc2(a1)
self.h2 = h2.data
a2 = self.act(h2)
self.a2 = torch.cat((a2.data, self.one), dim=1)
h3 = self.fc3(a2)
return h3
def sample_predict(self, x, Nsamples, Qinv1, HHinv1, MAP1, Qinv2,
HHinv2, MAP2, Qinv3, HHinv3, MAP3):
predictions = x.data.new(Nsamples, x.shape[0], self.output_dim)
x = x.view(-1, self.input_dim)
for i in range(Nsamples):
w1, b1 = sample_K_laplace_MN(MAP1, Qinv1, HHinv1)
a = torch.matmul(x, torch.t(w1)) + b1.unsqueeze(0)
a = self.act(a)
w2, b2 = sample_K_laplace_MN(MAP2, Qinv2, HHinv2)
a = torch.matmul(a, torch.t(w2)) + b2.unsqueeze(0)
a = self.act(a)
w3, b3 = sample_K_laplace_MN(MAP3, Qinv3, HHinv3)
y = torch.matmul(a, torch.t(w3)) + b3.unsqueeze(0)
predictions[i] = y
return predictions
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
GaussianSubnetBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cs/ccsak2aq2focic3gvi5yypd2u37w22ixutbqzqc6vdjhrk4zppac.py
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 144, grid=grid(144), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class GaussianSubnetBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel, tanh=False):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=1 if
kernel > 1 else 0)
self.activation = nn.Tanh() if tanh else nn.ReLU()
if tanh:
nn.init.xavier_normal_(self.conv.weight, gain=nn.init.
calculate_gain('tanh'))
else:
nn.init.kaiming_normal_(self.conv.weight, nonlinearity='relu')
nn.init.constant_(self.conv.bias, 0)
def forward(self, x):
x = self.conv(x)
return self.activation(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(144)](buf1,
primals_2, buf2, 144, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class GaussianSubnetBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel, tanh=False):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=1 if
kernel > 1 else 0)
self.activation = nn.Tanh() if tanh else nn.ReLU()
if tanh:
nn.init.xavier_normal_(self.conv.weight, gain=nn.init.
calculate_gain('tanh'))
else:
nn.init.kaiming_normal_(self.conv.weight, nonlinearity='relu')
nn.init.constant_(self.conv.bias, 0)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| laitalaj/cvpce | GaussianSubnetBlock | false | 7,064 | [
"MIT"
] | 1 | 7509e7d7783039f39a88edc6e411333bcf6fb2af | https://github.com/laitalaj/cvpce/tree/7509e7d7783039f39a88edc6e411333bcf6fb2af | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel, tanh=False):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=1 if
kernel > 1 else 0)
self.activation = nn.Tanh() if tanh else nn.ReLU()
if tanh:
nn.init.xavier_normal_(self.conv.weight, gain=nn.init.
calculate_gain('tanh'))
else:
nn.init.kaiming_normal_(self.conv.weight, nonlinearity='relu')
nn.init.constant_(self.conv.bias, 0)
def forward(self, x):
x = self.conv(x)
return self.activation(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Detector | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cq/ccqmv4vauv44j2hgnxn5sporqcy2yzbsian3zehzz6wa7nmateak.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 819200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 50
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dx/cdxfdpvkv63m5vdooyxvbdyeeowuwml22ndv4ubxslsfdztt23ho.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 204800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/an/canisl5jffgnaiz27t5zdddv72uhwcfbr7z7qbnax2fsu26aohm5.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ej/cejadgdnsagmgryonneqam4sfe3q4rk7llixbmxz3v7jpcioaosf.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_5 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (2*x1), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), None, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_2, (50, ), (1, ))
assert_size_stride(primals_3, (4, 50, 64, 64), (204800, 4096, 64, 1))
assert_size_stride(primals_4, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (50, 200), (200, 1))
assert_size_stride(primals_7, (50, ), (1, ))
assert_size_stride(primals_8, (2, 50), (50, 1))
assert_size_stride(primals_9, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 819200, grid=grid(819200), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 819200, grid=grid(819200), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 50, 32, 32), (51200, 1024, 32, 1), torch.int8)
buf5 = empty_strided_cuda((4, 50, 32, 32), (51200, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 204800, grid=grid(204800), stream=stream0)
buf6 = empty_strided_cuda((1024, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (1024, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 50), (1, 200), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf7, primals_7, 51200, grid=grid(51200), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((1024, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf7, reinterpret_tensor(primals_8, (50, 2), (1, 50), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((1024, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf8, buf9, 2048, grid=grid(2048), stream=stream0)
del buf8
return (buf9, primals_1, primals_3, primals_4, buf1, buf3, buf4, reinterpret_tensor(buf5, (1024, 200), (200, 1), 0), buf7, buf9, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((50, 50, 3, 3), (450, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 50, 64, 64), (204800, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 50, 3, 3), (450, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((50, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Detector(nn.Module):
def __init__(self):
super(Detector, self).__init__()
self.conv1 = nn.Conv2d(50, 50, 3, 1, 1)
self.conv2 = nn.Conv2d(50, 50, 3, 1, 1)
self.fc1 = nn.Linear(2 * 2 * 50, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 2 * 2 * 50)
x = F.relu(self.fc1(x))
x = F.softmax(self.fc2(x), dim=1)
return x
def get_inputs():
return [torch.rand([4, 50, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + 2 * x1, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), None, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 50, 64, 64), (204800, 4096, 64, 1))
assert_size_stride(primals_4, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (50, 200), (200, 1))
assert_size_stride(primals_7, (50,), (1,))
assert_size_stride(primals_8, (2, 50), (50, 1))
assert_size_stride(primals_9, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(819200)](buf1, primals_2,
819200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(819200)](buf3, primals_5,
819200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 50, 32, 32), (51200, 1024, 32, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 50, 32, 32), (51200, 1024, 32, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(204800)](buf3, buf4,
buf5, 204800, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = empty_strided_cuda((1024, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (1024, 200), (200, 1), 0
), reinterpret_tensor(primals_6, (200, 50), (1, 200), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_2[grid(51200)](buf7, primals_7, 51200, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((1024, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_9, buf7, reinterpret_tensor(primals_8,
(50, 2), (1, 50), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((1024, 2), (2, 1), torch.float32)
triton_poi_fused__softmax_3[grid(2048)](buf8, buf9, 2048, XBLOCK=
256, num_warps=4, num_stages=1)
del buf8
return (buf9, primals_1, primals_3, primals_4, buf1, buf3, buf4,
reinterpret_tensor(buf5, (1024, 200), (200, 1), 0), buf7, buf9,
primals_8, primals_6)
class DetectorNew(nn.Module):
def __init__(self):
super(DetectorNew, self).__init__()
self.conv1 = nn.Conv2d(50, 50, 3, 1, 1)
self.conv2 = nn.Conv2d(50, 50, 3, 1, 1)
self.fc1 = nn.Linear(2 * 2 * 50, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| ksivaman/observer-networks | Detector | false | 7,065 | [
"MIT"
] | 1 | a0cd540c762751c5500f714dc3979d3a62b9ea77 | https://github.com/ksivaman/observer-networks/tree/a0cd540c762751c5500f714dc3979d3a62b9ea77 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(50, 50, 3, 1, 1)
self.conv2 = nn.Conv2d(50, 50, 3, 1, 1)
self.fc1 = nn.Linear(2 * 2 * 50, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 2 * 2 * 50)
x = F.relu(self.fc1(x))
x = F.softmax(self.fc2(x), dim=1)
return x
def get_inputs():
return [torch.rand([4, 50, 64, 64])]
def get_init_inputs():
return []
|
BahdanauAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fo/cfokv5mhjtrq5wugzlwfb7neo7lma2qe4v72mqif34incoecopkz.py
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm => pow_1, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
triton_per_fused_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n4/cn4tc22cfrubj7ikntlgvo2zigcqyb4exwc5hxvpzcneh4ztsn7l.py
# Topologically Sorted Source Nodes: [add, add_1, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# tanh => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, %view_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_7), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/by/cbyk55yox2xs65bp7r2r6lyxsibdiqng2w7afs2wvc4dl67gwtfi.py
# Topologically Sorted Source Nodes: [mul, norm, normed_v, mul_1, attn_scores, attn_scores_1], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.div, aten.sum, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => sum_2
# attn_scores_1 => amax, div_1, exp, sub, sum_3
# mul => mul
# mul_1 => mul_1
# norm => pow_2
# normed_v => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %primals_6), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %pow_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %tanh), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sum_2, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {})
triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2 = async_compile.triton('triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex % 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp8 = tl.load(in_ptr3 + (x4 + (64*x2)), xmask)
tmp10 = tl.load(in_ptr3 + (16 + x4 + (64*x2)), xmask)
tmp13 = tl.load(in_ptr3 + (32 + x4 + (64*x2)), xmask)
tmp16 = tl.load(in_ptr3 + (48 + x4 + (64*x2)), xmask)
tmp3 = tmp1 * tmp2
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp3 / tmp6
tmp9 = tmp7 * tmp8
tmp11 = tmp7 * tmp10
tmp12 = tmp9 + tmp11
tmp14 = tmp7 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp7 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp18 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 / tmp20
tl.store(in_out_ptr0 + (x3), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nu/cnub5y6hoo5eqx4ansihvpe6lypvv3sdp7muydbgzcmc6izyixnc.py
# Topologically Sorted Source Nodes: [mul_2, context], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# context => sum_4
# mul_2 => mul_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %primals_4), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [0]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [key], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0.run(primals_6, buf2, 1, 4, grid=grid(1), stream=stream0)
buf3 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, add_1, tanh], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_1.run(buf3, buf1, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf4 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul, norm, normed_v, mul_1, attn_scores, attn_scores_1], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.div, aten.sum, aten._softmax]
triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2.run(buf5, primals_5, primals_6, buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf2
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [mul_2, context], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf5, primals_4, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, buf5, primals_4, primals_5, primals_6, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
from typing import *
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class BaseAttention(nn.Module):
"""Base class for attention layers."""
def __init__(self, query_dim, value_dim, embed_dim=None):
super().__init__()
self.query_dim = query_dim
self.value_dim = value_dim
self.embed_dim = embed_dim
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
pass
def forward(self, query, value, key_padding_mask=None, state=None):
raise NotImplementedError
class BahdanauAttention(BaseAttention):
""" Bahdanau Attention."""
def __init__(self, query_dim, value_dim, embed_dim, normalize=True):
super().__init__(query_dim, value_dim, embed_dim)
self.query_proj = nn.Linear(self.query_dim, embed_dim, bias=False)
self.value_proj = nn.Linear(self.value_dim, embed_dim, bias=False)
self.v = Parameter(torch.Tensor(embed_dim))
self.normalize = normalize
if self.normalize:
self.b = Parameter(torch.Tensor(embed_dim))
self.g = Parameter(torch.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
self.query_proj.weight.data.uniform_(-0.1, 0.1)
self.value_proj.weight.data.uniform_(-0.1, 0.1)
nn.init.uniform_(self.v, -0.1, 0.1)
if self.normalize:
nn.init.constant_(self.b, 0.0)
nn.init.constant_(self.g, math.sqrt(1.0 / self.embed_dim))
def forward(self, query, value, key_padding_mask=None, state=None):
projected_query = self.query_proj(query).unsqueeze(0)
key = self.value_proj(value)
if self.normalize:
normed_v = self.g * self.v / torch.norm(self.v)
attn_scores = (normed_v * torch.tanh(projected_query + key +
self.b)).sum(dim=2)
else:
attn_scores = self.v * torch.tanh(projected_query + key).sum(dim=2)
if key_padding_mask is not None:
attn_scores = attn_scores.float().masked_fill_(key_padding_mask,
float('-inf')).type_as(attn_scores)
attn_scores = F.softmax(attn_scores, dim=0)
context = (attn_scores.unsqueeze(2) * value).sum(dim=0)
next_state = attn_scores
return context, attn_scores, next_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'query_dim': 4, 'value_dim': 4, 'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.utils.data
from typing import *
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex % 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp8 = tl.load(in_ptr3 + (x4 + 64 * x2), xmask)
tmp10 = tl.load(in_ptr3 + (16 + x4 + 64 * x2), xmask)
tmp13 = tl.load(in_ptr3 + (32 + x4 + 64 * x2), xmask)
tmp16 = tl.load(in_ptr3 + (48 + x4 + 64 * x2), xmask)
tmp3 = tmp1 * tmp2
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp3 / tmp6
tmp9 = tmp7 * tmp8
tmp11 = tmp7 * tmp10
tmp12 = tmp9 + tmp11
tmp14 = tmp7 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp7 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp18 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 / tmp20
tl.store(in_out_ptr0 + x3, tmp21, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(1)](primals_6, buf2, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf0
triton_poi_fused_add_tanh_1[grid(256)](buf3, buf1, primals_7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf4 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4
del buf4
triton_poi_fused__softmax_div_linalg_vector_norm_mul_sum_2[grid(64)](
buf5, primals_5, primals_6, buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_sum_3[grid(256)](buf5, primals_4, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf6, buf5, primals_4, primals_5, primals_6, reinterpret_tensor(
primals_2, (64, 4), (4, 1), 0), buf3, buf5
class BaseAttention(nn.Module):
"""Base class for attention layers."""
def __init__(self, query_dim, value_dim, embed_dim=None):
super().__init__()
self.query_dim = query_dim
self.value_dim = value_dim
self.embed_dim = embed_dim
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
pass
def forward(self, query, value, key_padding_mask=None, state=None):
raise NotImplementedError
class BahdanauAttentionNew(BaseAttention):
""" Bahdanau Attention."""
def __init__(self, query_dim, value_dim, embed_dim, normalize=True):
super().__init__(query_dim, value_dim, embed_dim)
self.query_proj = nn.Linear(self.query_dim, embed_dim, bias=False)
self.value_proj = nn.Linear(self.value_dim, embed_dim, bias=False)
self.v = Parameter(torch.Tensor(embed_dim))
self.normalize = normalize
if self.normalize:
self.b = Parameter(torch.Tensor(embed_dim))
self.g = Parameter(torch.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
self.query_proj.weight.data.uniform_(-0.1, 0.1)
self.value_proj.weight.data.uniform_(-0.1, 0.1)
nn.init.uniform_(self.v, -0.1, 0.1)
if self.normalize:
nn.init.constant_(self.b, 0.0)
nn.init.constant_(self.g, math.sqrt(1.0 / self.embed_dim))
def forward(self, input_0, input_1):
primals_6 = self.v
primals_7 = self.b
primals_5 = self.g
primals_1 = self.query_proj.weight
primals_3 = self.value_proj.weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
| lahiruts/espresso | BahdanauAttention | false | 7,066 | [
"MIT"
] | 1 | 940a1bf3c2c3d4a057d543b875c329b0515e6b55 | https://github.com/lahiruts/espresso/tree/940a1bf3c2c3d4a057d543b875c329b0515e6b55 | import math
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
from typing import *
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class BaseAttention(nn.Module):
"""Base class for attention layers."""
def __init__(self, query_dim, value_dim, embed_dim=None):
super().__init__()
self.query_dim = query_dim
self.value_dim = value_dim
self.embed_dim = embed_dim
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
pass
def forward(self, query, value, key_padding_mask=None, state=None):
raise NotImplementedError
class Model(BaseAttention):
""" Bahdanau Attention."""
def __init__(self, query_dim, value_dim, embed_dim, normalize=True):
super().__init__(query_dim, value_dim, embed_dim)
self.query_proj = nn.Linear(self.query_dim, embed_dim, bias=False)
self.value_proj = nn.Linear(self.value_dim, embed_dim, bias=False)
self.v = Parameter(torch.Tensor(embed_dim))
self.normalize = normalize
if self.normalize:
self.b = Parameter(torch.Tensor(embed_dim))
self.g = Parameter(torch.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
self.query_proj.weight.data.uniform_(-0.1, 0.1)
self.value_proj.weight.data.uniform_(-0.1, 0.1)
nn.init.uniform_(self.v, -0.1, 0.1)
if self.normalize:
nn.init.constant_(self.b, 0.0)
nn.init.constant_(self.g, math.sqrt(1.0 / self.embed_dim))
def forward(self, query, value, key_padding_mask=None, state=None):
projected_query = self.query_proj(query).unsqueeze(0)
key = self.value_proj(value)
if self.normalize:
normed_v = self.g * self.v / torch.norm(self.v)
attn_scores = (normed_v * torch.tanh(projected_query + key +
self.b)).sum(dim=2)
else:
attn_scores = self.v * torch.tanh(projected_query + key).sum(dim=2)
if key_padding_mask is not None:
attn_scores = attn_scores.float().masked_fill_(key_padding_mask,
float('-inf')).type_as(attn_scores)
attn_scores = F.softmax(attn_scores, dim=0)
context = (attn_scores.unsqueeze(2) * value).sum(dim=0)
next_state = attn_scores
return context, attn_scores, next_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
EncoderNO2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4t/c4tgqjmoizntbw42bzrbyamppc37vlvapvucgz7q5ijunqnhza4a.py
# Topologically Sorted Source Nodes: [truediv, std], Original ATen: [aten.div, aten.exp]
# Source node to ATen node mapping:
# std => exp
# truediv => div
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_tensor, 2), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
triton_poi_fused_div_exp_1 = async_compile.triton('triton_poi_fused_div_exp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_exp_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (8, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (8, 1), 4), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [truediv, std], Original ATen: [aten.div, aten.exp]
triton_poi_fused_div_exp_1.run(buf5, primals_9, 256, grid=grid(256), stream=stream0)
del primals_9
return (buf3, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (8, 1), 0), reinterpret_tensor(buf2, (64, 4), (8, 1), 4), buf5, primals_8, primals_6, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class EncoderNO2(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H)
self.enc2 = nn.Linear(in_features=self.H, out_features=self.M * 2)
self.loc_param = nn.Linear(in_features=self.M, out_features=self.M)
self.scale_param = nn.Linear(in_features=self.M, out_features=self.M)
def forward(self, x):
x = self.enc1(x)
x = nn.functional.relu(x)
x = self.enc2(x)
x = x.view(-1, 2, self.M)
mu = self.loc_param(x[:, 0, :])
log_var = self.scale_param(x[:, 1, :])
std = torch.exp(log_var / 2)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D': 4, 'H': 4, 'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_div_exp_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tl.store(in_out_ptr0 + x2, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (
8, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (8, 1), 4),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_div_exp_1[grid(256)](buf5, primals_9, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
return buf3, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf2, (64, 4), (8, 1), 0), reinterpret_tensor(buf2, (64, 4), (8, 1), 4
), buf5, primals_8, primals_6, primals_4, buf6
class EncoderNO2New(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H)
self.enc2 = nn.Linear(in_features=self.H, out_features=self.M * 2)
self.loc_param = nn.Linear(in_features=self.M, out_features=self.M)
self.scale_param = nn.Linear(in_features=self.M, out_features=self.M)
def forward(self, input_0):
primals_1 = self.enc1.weight
primals_2 = self.enc1.bias
primals_4 = self.enc2.weight
primals_5 = self.enc2.bias
primals_6 = self.loc_param.weight
primals_7 = self.loc_param.bias
primals_8 = self.scale_param.weight
primals_9 = self.scale_param.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| le0x99/deep-generative-modeling | EncoderNO2 | false | 7,067 | [
"MIT"
] | 1 | 40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | https://github.com/le0x99/deep-generative-modeling/tree/40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H)
self.enc2 = nn.Linear(in_features=self.H, out_features=self.M * 2)
self.loc_param = nn.Linear(in_features=self.M, out_features=self.M)
self.scale_param = nn.Linear(in_features=self.M, out_features=self.M)
def forward(self, x):
x = self.enc1(x)
x = nn.functional.relu(x)
x = self.enc2(x)
x = x.view(-1, 2, self.M)
mu = self.loc_param(x[:, 0, :])
log_var = self.scale_param(x[:, 1, :])
std = torch.exp(log_var / 2)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Decoder2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pd/cpdu37l3bj63bjibgjk2ueagf7o3e26iukuvw6axiaa2bjb2e6op.py
# Topologically Sorted Source Nodes: [Z_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Z_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [Z_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Z_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [mu_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# mu_1 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dz/cdzh2oi7rw6yhfcc6xr56aipmptbeerrkvpvz6bngz7przhadvfl.py
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
# Source node to ATen node mapping:
# std => exp
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_8,), kwargs = {})
triton_poi_fused_exp_3 = async_compile.triton('triton_poi_fused_exp_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [Z_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 512, grid=grid(512), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [Z_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [mu_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((1, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
triton_poi_fused_exp_3.run(primals_8, buf6, 1, grid=grid(1), stream=stream0)
del primals_8
return (buf5, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder2(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H * 2)
self.dec2 = nn.Linear(in_features=self.H * 2, out_features=self.H)
self.dec3 = nn.Linear(in_features=self.H, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, Z):
Z = self.dec1(Z)
Z = nn.functional.relu(Z)
Z = self.dec2(Z)
Z = nn.functional.relu(Z)
mu = self.dec3(Z)
mu = nn.functional.tanh(mu)
std = torch.exp(self.log_scale)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4, 'H': 4, 'D': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_exp_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1,
primals_2, buf8, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((1,), (1,), torch.float32)
triton_poi_fused_exp_3[grid(1)](primals_8, buf6, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del primals_8
return buf5, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, buf6, primals_6, buf7, primals_4, buf8
class Decoder2New(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H * 2)
self.dec2 = nn.Linear(in_features=self.H * 2, out_features=self.H)
self.dec3 = nn.Linear(in_features=self.H, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, input_0):
primals_8 = self.log_scale
primals_1 = self.dec1.weight
primals_2 = self.dec1.bias
primals_4 = self.dec2.weight
primals_5 = self.dec2.bias
primals_6 = self.dec3.weight
primals_7 = self.dec3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| le0x99/deep-generative-modeling | Decoder2 | false | 7,068 | [
"MIT"
] | 1 | 40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | https://github.com/le0x99/deep-generative-modeling/tree/40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H * 2)
self.dec2 = nn.Linear(in_features=self.H * 2, out_features=self.H)
self.dec3 = nn.Linear(in_features=self.H, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, Z):
Z = self.dec1(Z)
Z = nn.functional.relu(Z)
Z = self.dec2(Z)
Z = nn.functional.relu(Z)
mu = self.dec3(Z)
mu = nn.functional.tanh(mu)
std = torch.exp(self.log_scale)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
InnerProductDecoder | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mb/cmb72vxh36b4k6lvmt4562lj3nrqtpyzst2qbon2yqx22gdjfa7x.py
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# adj => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mm,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.native_dropout]
buf0 = torch.ops.aten.native_dropout.default(arg0_1, 0.1, True)
del arg0_1
buf1 = buf0[0]
del buf0
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3)
del buf1
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf4, 16, grid=grid(16), stream=stream0)
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class InnerProductDecoder(nn.Module):
def __init__(self, activation=torch.sigmoid, dropout=0.1):
super().__init__()
self.dropout = dropout
self.activation = activation
def forward(self, z):
z = F.dropout(z, self.dropout)
adj = self.activation(torch.matmul(z, z.t()))
return adj
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.native_dropout.default(arg0_1, 0.1, True)
del arg0_1
buf1 = buf0[0]
del buf0
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf3)
del buf1
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(16)](buf4, 16, XBLOCK=16, num_warps
=1, num_stages=1)
return buf4,
class InnerProductDecoderNew(nn.Module):
def __init__(self, activation=torch.sigmoid, dropout=0.1):
super().__init__()
self.dropout = dropout
self.activation = activation
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| leiyu-thunder/gae_dgl | InnerProductDecoder | false | 7,069 | [
"Apache-2.0"
] | 1 | c743acc96e24c4ca3ae72d08956381f302b373bd | https://github.com/leiyu-thunder/gae_dgl/tree/c743acc96e24c4ca3ae72d08956381f302b373bd | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, activation=torch.sigmoid, dropout=0.1):
super().__init__()
self.dropout = dropout
self.activation = activation
def forward(self, z):
z = F.dropout(z, self.dropout)
adj = self.activation(torch.matmul(z, z.t()))
return adj
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
NN_logsoftmax | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pr/cprthrqz6iotcmrjfcrj7taqntzxisdcjtr54gsuz2ck2kf6kbsr.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bg/cbg32drchyezvbfwshguvyopixmzwi2llws7xkhvpdruis76tr2t.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oo/coo5rivaroinv27r7to5gs4jb7ce7itar6epfsastoa2ig6tj65k.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 1), (1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class NN_logsoftmax(nn.Module):
"""Build a new class for the network you want to run, returning log
softmax"""
def set_parameters(self, initializers):
"""Set the parameter values obtained from vanilla NN as initializers"""
with torch.no_grad():
self.fc1.weight.data = torch.from_numpy(initializers[0].copy())
self.fc1.bias.data = torch.from_numpy(initializers[1].copy())
self.fc2.weight.data = torch.from_numpy(initializers[2].copy())
self.fc2.bias.data = torch.from_numpy(initializers[3].copy())
"""Single layer network with layer_size nodes"""
def __init__(self, d, layer_size, num_classes):
super(NN_logsoftmax, self).__init__()
self.fc1 = nn.Linear(d, layer_size)
self.fc2 = nn.Linear(layer_size, num_classes)
"""Return the log softmax values for each of the classes"""
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d': 4, 'layer_size': 1, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_2, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 1), (
1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), buf4, primals_4, buf5
class NN_logsoftmaxNew(nn.Module):
"""Build a new class for the network you want to run, returning log
softmax"""
def set_parameters(self, initializers):
"""Set the parameter values obtained from vanilla NN as initializers"""
with torch.no_grad():
self.fc1.weight.data = torch.from_numpy(initializers[0].copy())
self.fc1.bias.data = torch.from_numpy(initializers[1].copy())
self.fc2.weight.data = torch.from_numpy(initializers[2].copy())
self.fc2.bias.data = torch.from_numpy(initializers[3].copy())
"""Single layer network with layer_size nodes"""
def __init__(self, d, layer_size, num_classes):
super(NN_logsoftmaxNew, self).__init__()
self.fc1 = nn.Linear(d, layer_size)
self.fc2 = nn.Linear(layer_size, num_classes)
"""Return the log softmax values for each of the classes"""
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| laravomfell/tvd_loss | NN_logsoftmax | false | 7,070 | [
"MIT"
] | 1 | b30a925f95985a03ff70bfa40a6ec3662432779d | https://github.com/laravomfell/tvd_loss/tree/b30a925f95985a03ff70bfa40a6ec3662432779d | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""Build a new class for the network you want to run, returning log
softmax"""
def set_parameters(self, initializers):
"""Set the parameter values obtained from vanilla NN as initializers"""
with torch.no_grad():
self.fc1.weight.data = torch.from_numpy(initializers[0].copy())
self.fc1.bias.data = torch.from_numpy(initializers[1].copy())
self.fc2.weight.data = torch.from_numpy(initializers[2].copy())
self.fc2.bias.data = torch.from_numpy(initializers[3].copy())
"""Single layer network with layer_size nodes"""
def __init__(self, d, layer_size, num_classes):
super().__init__()
self.fc1 = nn.Linear(d, layer_size)
self.fc2 = nn.Linear(layer_size, num_classes)
"""Return the log softmax values for each of the classes"""
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 1, 4]
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [Z_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Z_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py
# Topologically Sorted Source Nodes: [mu_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# mu_1 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/um/cum2fesw33hwazpaeuen4zbyic2hn2gqgkncqznvseppsa5ejb6s.py
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
# Source node to ATen node mapping:
# std => exp
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_8,), kwargs = {})
triton_poi_fused_exp_2 = async_compile.triton('triton_poi_fused_exp_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [Z_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [Z_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [mu_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((1, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
triton_poi_fused_exp_2.run(primals_8, buf6, 1, grid=grid(1), stream=stream0)
del primals_8
return (buf5, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H)
self.dec2 = nn.Linear(in_features=self.H, out_features=self.H)
self.dec3 = nn.Linear(in_features=self.H, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, Z):
Z = self.dec1(Z)
Z = nn.functional.relu(Z)
Z = self.dec2(Z)
Z = nn.functional.relu(Z)
mu = self.dec3(Z)
mu = nn.functional.tanh(mu)
std = torch.exp(self.log_scale)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4, 'H': 4, 'D': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_exp_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((1,), (1,), torch.float32)
triton_poi_fused_exp_2[grid(1)](primals_8, buf6, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del primals_8
return buf5, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, buf6, primals_6, buf7, primals_4, buf8
class DecoderNew(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H)
self.dec2 = nn.Linear(in_features=self.H, out_features=self.H)
self.dec3 = nn.Linear(in_features=self.H, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, input_0):
primals_8 = self.log_scale
primals_1 = self.dec1.weight
primals_2 = self.dec1.bias
primals_4 = self.dec2.weight
primals_5 = self.dec2.bias
primals_6 = self.dec3.weight
primals_7 = self.dec3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| le0x99/deep-generative-modeling | Decoder | false | 7,071 | [
"MIT"
] | 1 | 40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | https://github.com/le0x99/deep-generative-modeling/tree/40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H)
self.dec2 = nn.Linear(in_features=self.H, out_features=self.H)
self.dec3 = nn.Linear(in_features=self.H, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, Z):
Z = self.dec1(Z)
Z = nn.functional.relu(Z)
Z = self.dec2(Z)
Z = nn.functional.relu(Z)
mu = self.dec3(Z)
mu = nn.functional.tanh(mu)
std = torch.exp(self.log_scale)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3ucft62zfsdldfp5jo3ilwlzjqnalgzyy5tm2e2sonktu7eernk.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3600) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v3/cv3g6yelvgzwyu3llhoytuo5q7ieh4is6hu4eainxjbiiqaknnwb.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 401408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3136) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gg/cggecmyxgs77o5tqjqxnyucpmbtlvs2jzafuuusmwxnflcxxjzqi.py
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# x_1 => relu_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 28
x1 = (xindex // 28)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (56 + (2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (57 + (2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ar/carehaf65zlhitys3iauvqzlcwbhk4nbxzybckcleuv3qwzs73un.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 576) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ua/cuan73uvblihzk5ct3zyws3uv6vww7zp5c5rnangiz2f6h2vwjln.py
# Topologically Sorted Source Nodes: [max_pool2d_1, x_3], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# x_3 => relu_2
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp18, None)
tl.store(out_ptr2 + (x2), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/be/cbeepizysguw7wfu6klak2ddhbaxxwroegtezj4sjfcstphkl4ji.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_6 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/56/c56kom4zmmeyue5st3iybolzrrahjuwzi6zbcjdoi3n7rwyjp2ih.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_6 = async_compile.triton('triton_per_fused__log_softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (256, 576), (576, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (10, 256), (256, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 460800, grid=grid(460800), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 56, 56), (100352, 3136, 56, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 401408, grid=grid(401408), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1), torch.int8)
buf5 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_2.run(buf3, buf4, buf5, 100352, grid=grid(100352), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 24, 24), (36864, 576, 24, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf7, primals_7, 147456, grid=grid(147456), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.int8)
buf9 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.float32)
buf16 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [max_pool2d_1, x_3], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4.run(buf7, buf8, buf9, buf16, 36864, grid=grid(36864), stream=stream0)
buf10 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (64, 576), (576, 1), 0), reinterpret_tensor(primals_8, (576, 256), (1, 576), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 16384, grid=grid(16384), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_6.run(buf12, buf15, 64, 10, grid=grid(64), stream=stream0)
del buf12
return (buf15, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (64, 576), (576, 1), 0), buf11, buf15, primals_10, primals_8, buf16, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 576), (576, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
self.conv3 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(3 * 3 * 64, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(F.max_pool2d(self.conv3(x), 2))
x = F.dropout(x, p=0.5, training=self.training)
x = x.view(-1, 3 * 3 * 64)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3136 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 28
x1 = xindex // 28
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 112 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 112 * x1), None, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (56 + 2 * x0 + 112 * x1), None,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (57 + 2 * x0 + 112 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 576 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp18, None)
tl.store(out_ptr2 + x2, tmp20, None)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (256, 576), (576, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (10, 256), (256, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(460800)](buf1, primals_2,
460800, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 56, 56), (100352, 3136, 56, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(401408)](buf3, primals_5,
401408, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_2[grid(100352)](buf3,
buf4, buf5, 100352, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 24, 24), (36864, 576, 24, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(147456)](buf7, primals_7,
147456, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.int8)
buf9 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.float32)
buf16 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4[grid
(36864)](buf7, buf8, buf9, buf16, 36864, XBLOCK=512, num_warps=
4, num_stages=1)
buf10 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 576), (576, 1), 0),
reinterpret_tensor(primals_8, (576, 256), (1, 576), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(16384)](buf11, primals_9, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(64)](buf12, buf15, 64, 10,
XBLOCK=8, num_warps=2, num_stages=1)
del buf12
return (buf15, primals_1, primals_3, primals_4, primals_6, buf1, buf3,
buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (64, 576), (576, 1
), 0), buf11, buf15, primals_10, primals_8, buf16)
class CNNNew(nn.Module):
def __init__(self):
super(CNNNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
self.conv3 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(3 * 3 * 64, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| krishnachaitanya7/Manifolk | CNN | false | 7,072 | [
"MIT"
] | 1 | 779a044af8ce82c913957ce341b9c9f2f1d1e815 | https://github.com/krishnachaitanya7/Manifolk/tree/779a044af8ce82c913957ce341b9c9f2f1d1e815 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
self.conv3 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(3 * 3 * 64, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(F.max_pool2d(self.conv3(x), 2))
x = F.dropout(x, p=0.5, training=self.training)
x = x.view(-1, 3 * 3 * 64)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
NetPart1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ds/cdsa53dqtl7jesxfa6gnrq7ulnchr6435id2q4mimzmffnpas7s7.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 11808768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3844) % 768
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s6/cs6oowloxihsa2iqf677aggor4dgxx3q4fq7upsksc3m7rf4j6xb.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 921600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3600) % 64
x0 = xindex % 3600
x4 = (xindex // 3600)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6v/c6votzbefolf4fzzmnc4okxekuuh37ajyccnmrjo4ze3cvigljbw.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_4 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = (xindex // 30) % 30
x2 = (xindex // 900)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (768, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (768, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 768, 3, 3), (6912, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 768, 62, 62), (2952192, 3844, 62, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 11808768, grid=grid(11808768), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 60, 60), (230400, 3600, 60, 1))
buf3 = empty_strided_cuda((4, 64, 60, 60), (231424, 3616, 60, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_5, buf3, 921600, grid=grid(921600), stream=stream0)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1), torch.int8)
buf5 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 230400, grid=grid(230400), stream=stream0)
return (reinterpret_tensor(buf5, (4, 57600), (57600, 1), 0), primals_1, primals_3, primals_4, buf1, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((768, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 768, 3, 3), (6912, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class NetPart1(nn.Module):
def __init__(self):
super(NetPart1, self).__init__()
d1 = 768
self.conv1 = nn.Conv2d(1, d1, 3, 1)
self.conv2 = nn.Conv2d(d1, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3844 % 768
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 64
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (768, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 768, 3, 3), (6912, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 768, 62, 62), (2952192, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(11808768)](buf1, primals_2,
11808768, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 60, 60), (230400, 3600, 60, 1))
buf3 = empty_strided_cuda((4, 64, 60, 60), (231424, 3616, 60, 1),
torch.float32)
triton_poi_fused_convolution_relu_1[grid(921600)](buf2, primals_5,
buf3, 921600, XBLOCK=1024, num_warps=4, num_stages=1)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(230400)](buf3, buf4,
buf5, 230400, XBLOCK=512, num_warps=8, num_stages=1)
return reinterpret_tensor(buf5, (4, 57600), (57600, 1), 0
), primals_1, primals_3, primals_4, buf1, buf3, buf4
class NetPart1New(nn.Module):
def __init__(self):
super(NetPart1New, self).__init__()
d1 = 768
self.conv1 = nn.Conv2d(1, d1, 3, 1)
self.conv2 = nn.Conv2d(d1, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| lancelee82/necklace | NetPart1 | false | 7,073 | [
"MIT"
] | 1 | 7a7cfbc05284c1a7ae0a923c8b9a3efdd0037579 | https://github.com/lancelee82/necklace/tree/7a7cfbc05284c1a7ae0a923c8b9a3efdd0037579 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self):
super().__init__()
d1 = 768
self.conv1 = nn.Conv2d(1, d1, 3, 1)
self.conv2 = nn.Conv2d(d1, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qj/cqjuhvr76bx6bqynpw2c4jacd7opgnodfbhppcntdkuiscw2isyj.py
# Topologically Sorted Source Nodes: [truediv, std], Original ATen: [aten.div, aten.exp]
# Source node to ATen node mapping:
# std => exp
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_1, 2), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
triton_poi_fused_div_exp_1 = async_compile.triton('triton_poi_fused_div_exp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, std], Original ATen: [aten.div, aten.exp]
triton_poi_fused_div_exp_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf4, (64, 4), (8, 1), 0), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H)
self.enc2 = nn.Linear(in_features=self.H, out_features=self.H)
self.enc3 = nn.Linear(in_features=self.H, out_features=self.M * 2)
def forward(self, x):
x = self.enc1(x)
x = nn.functional.relu(x)
x = self.enc2(x)
x = nn.functional.relu(x)
x = self.enc3(x)
x = x.view(-1, 2, self.M)
mu = x[:, 0, :]
log_var = x[:, 1, :]
std = torch.exp(log_var / 2)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D': 4, 'H': 4, 'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_div_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_div_exp_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf4, (64, 4), (8, 1), 0
), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class EncoderNew(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H)
self.enc2 = nn.Linear(in_features=self.H, out_features=self.H)
self.enc3 = nn.Linear(in_features=self.H, out_features=self.M * 2)
def forward(self, input_0):
primals_1 = self.enc1.weight
primals_2 = self.enc1.bias
primals_4 = self.enc2.weight
primals_5 = self.enc2.bias
primals_6 = self.enc3.weight
primals_7 = self.enc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| le0x99/deep-generative-modeling | Encoder | false | 7,074 | [
"MIT"
] | 1 | 40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | https://github.com/le0x99/deep-generative-modeling/tree/40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H)
self.enc2 = nn.Linear(in_features=self.H, out_features=self.H)
self.enc3 = nn.Linear(in_features=self.H, out_features=self.M * 2)
def forward(self, x):
x = self.enc1(x)
x = nn.functional.relu(x)
x = self.enc2(x)
x = nn.functional.relu(x)
x = self.enc3(x)
x = x.view(-1, 2, self.M)
mu = x[:, 0, :]
log_var = x[:, 1, :]
std = torch.exp(log_var / 2)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Invertible1x1Conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2d/c2dur4cqvjdmojoen7nz625qbl3hymirj3ihsdj2scx7nk2xzzx5.py
# Topologically Sorted Source Nodes: [logdet, log_det_W], Original ATen: [aten.eq, aten.mul]
# Source node to ATen node mapping:
# log_det_W => mul
# logdet => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%getitem, -1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 16), kwargs = {})
triton_poi_fused_eq_mul_0 = async_compile.triton('triton_poi_fused_eq_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_mul_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp4 = tl.load(in_out_ptr0 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = -1.0
tmp3 = tmp1 == tmp2
tmp6 = float("nan")
tmp7 = tl.where(tmp3, tmp6, tmp5)
tmp8 = 16.0
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp3, None)
tl.store(in_out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [logdet], Original ATen: [aten._linalg_slogdet]
buf0 = torch.ops.aten._linalg_slogdet.default(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0))
buf1 = buf0[0]
buf2 = buf0[1]
buf3 = buf0[2]
buf4 = buf0[3]
del buf0
buf5 = empty_strided_cuda((1, ), (1, ), torch.bool)
buf7 = reinterpret_tensor(buf2, (), (), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [logdet, log_det_W], Original ATen: [aten.eq, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_eq_mul_0.run(buf7, buf1, buf5, 1, grid=grid(1), stream=stream0)
del buf1
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4), (16, 4, 1))
return (buf6, buf7, primals_1, primals_2, buf3, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data
import torch.nn
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=
0, bias=False)
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
W = W.contiguous()
self.conv.weight.data = W
def forward(self, z):
batch_size, _group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).
float()).squeeze()
z = self.conv(z)
return z, log_det_W
def infer(self, z):
_batch_size, _group_size, _n_of_groups = z.size()
W = self.conv.weight.squeeze()
if not hasattr(self, 'W_inverse'):
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor' or z.type(
) == 'torch.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'c': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eq_mul_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp4 = tl.load(in_out_ptr0 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = -1.0
tmp3 = tmp1 == tmp2
tmp6 = float('nan')
tmp7 = tl.where(tmp3, tmp6, tmp5)
tmp8 = 16.0
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp3, None)
tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp9, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._linalg_slogdet.default(reinterpret_tensor(
primals_2, (1, 4, 4), (16, 4, 1), 0))
buf1 = buf0[0]
buf2 = buf0[1]
buf3 = buf0[2]
buf4 = buf0[3]
del buf0
buf5 = empty_strided_cuda((1,), (1,), torch.bool)
buf7 = reinterpret_tensor(buf2, (), (), 0)
del buf2
get_raw_stream(0)
triton_poi_fused_eq_mul_0[grid(1)](buf7, buf1, buf5, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del buf1
buf6 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4), (16, 4, 1))
return buf6, buf7, primals_1, primals_2, buf3, buf4, buf5
class Invertible1x1ConvNew(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1ConvNew, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=
0, bias=False)
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
W = W.contiguous()
self.conv.weight.data = W
def infer(self, z):
_batch_size, _group_size, _n_of_groups = z.size()
W = self.conv.weight.squeeze()
if not hasattr(self, 'W_inverse'):
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor' or z.type(
) == 'torch.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| leo0519/TensorRT | Invertible1x1Conv | false | 7,075 | [
"Apache-2.0"
] | 1 | 498dcb009fe4c2dedbe9c61044d3de4f3c04a41b | https://github.com/leo0519/TensorRT/tree/498dcb009fe4c2dedbe9c61044d3de4f3c04a41b | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data
import torch.nn
class Model(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super().__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=
0, bias=False)
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
W = W.contiguous()
self.conv.weight.data = W
def forward(self, z):
batch_size, _group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).
float()).squeeze()
z = self.conv(z)
return z, log_det_W
def infer(self, z):
_batch_size, _group_size, _n_of_groups = z.size()
W = self.conv.weight.squeeze()
if not hasattr(self, 'W_inverse'):
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor' or z.type(
) == 'torch.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
NN_softmax | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pr/cprthrqz6iotcmrjfcrj7taqntzxisdcjtr54gsuz2ck2kf6kbsr.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 1), (1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class NN_logsoftmax(nn.Module):
"""Build a new class for the network you want to run, returning log
softmax"""
def set_parameters(self, initializers):
"""Set the parameter values obtained from vanilla NN as initializers"""
with torch.no_grad():
self.fc1.weight.data = torch.from_numpy(initializers[0].copy())
self.fc1.bias.data = torch.from_numpy(initializers[1].copy())
self.fc2.weight.data = torch.from_numpy(initializers[2].copy())
self.fc2.bias.data = torch.from_numpy(initializers[3].copy())
"""Single layer network with layer_size nodes"""
def __init__(self, d, layer_size, num_classes):
super(NN_logsoftmax, self).__init__()
self.fc1 = nn.Linear(d, layer_size)
self.fc2 = nn.Linear(layer_size, num_classes)
"""Return the log softmax values for each of the classes"""
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class NN_softmax(NN_logsoftmax):
"""Build a new class for the network you want to run, returning non-log
softmax"""
"""Return the softmax values for each of the classes"""
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d': 4, 'layer_size': 1, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_2, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 1), (
1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), buf4, primals_4, buf5
class NN_logsoftmax(nn.Module):
"""Build a new class for the network you want to run, returning log
softmax"""
def set_parameters(self, initializers):
"""Set the parameter values obtained from vanilla NN as initializers"""
with torch.no_grad():
self.fc1.weight.data = torch.from_numpy(initializers[0].copy())
self.fc1.bias.data = torch.from_numpy(initializers[1].copy())
self.fc2.weight.data = torch.from_numpy(initializers[2].copy())
self.fc2.bias.data = torch.from_numpy(initializers[3].copy())
"""Single layer network with layer_size nodes"""
def __init__(self, d, layer_size, num_classes):
super(NN_logsoftmax, self).__init__()
self.fc1 = nn.Linear(d, layer_size)
self.fc2 = nn.Linear(layer_size, num_classes)
"""Return the log softmax values for each of the classes"""
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class NN_softmaxNew(NN_logsoftmax):
"""Build a new class for the network you want to run, returning non-log
softmax"""
"""Return the softmax values for each of the classes"""
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| laravomfell/tvd_loss | NN_softmax | false | 7,076 | [
"MIT"
] | 1 | b30a925f95985a03ff70bfa40a6ec3662432779d | https://github.com/laravomfell/tvd_loss/tree/b30a925f95985a03ff70bfa40a6ec3662432779d | import torch
from torch import nn
import torch.nn.functional as F
class NN_logsoftmax(nn.Module):
"""Build a new class for the network you want to run, returning log
softmax"""
def set_parameters(self, initializers):
"""Set the parameter values obtained from vanilla NN as initializers"""
with torch.no_grad():
self.fc1.weight.data = torch.from_numpy(initializers[0].copy())
self.fc1.bias.data = torch.from_numpy(initializers[1].copy())
self.fc2.weight.data = torch.from_numpy(initializers[2].copy())
self.fc2.bias.data = torch.from_numpy(initializers[3].copy())
"""Single layer network with layer_size nodes"""
def __init__(self, d, layer_size, num_classes):
super().__init__()
self.fc1 = nn.Linear(d, layer_size)
self.fc2 = nn.Linear(layer_size, num_classes)
"""Return the log softmax values for each of the classes"""
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class Model(NN_logsoftmax):
"""Build a new class for the network you want to run, returning non-log
softmax"""
"""Return the softmax values for each of the classes"""
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 1, 4]
|
_ShiftedSoftPlus | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ur/curfo5zsg7v2odmq2ylzriguhfqc6nwunbfzegsawhqov3yup6aa.py
# Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub]
# Source node to ATen node mapping:
# softplus => exp, gt, log1p, where
# sub => sub
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, 0.6931471805599453), kwargs = {})
triton_poi_fused_softplus_sub_0 = async_compile.triton('triton_poi_fused_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471805599453
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.jit
import torch.nn.functional
import torch.nn
class _ShiftedSoftPlus(torch.nn.Module):
"""
Shifted softplus as defined in SchNet, NeurIPS 2017.
:param beta: value for the a more general softplus, default = 1
:param threshold: values above are linear function, default = 20
"""
_log2: 'float'
def __init__(self, beta=1, threshold=20):
super().__init__()
self.softplus = torch.nn.Softplus(beta=beta, threshold=threshold)
self._log2 = math.log(2.0)
def forward(self, x):
"""
Evaluate shifted softplus
:param x: torch.Tensor, input
:return: torch.Tensor, ssp(x)
"""
return self.softplus(x) - self._log2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.jit
import torch.nn.functional
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471805599453
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class _ShiftedSoftPlusNew(torch.nn.Module):
"""
Shifted softplus as defined in SchNet, NeurIPS 2017.
:param beta: value for the a more general softplus, default = 1
:param threshold: values above are linear function, default = 20
"""
_log2: 'float'
def __init__(self, beta=1, threshold=20):
super().__init__()
self.softplus = torch.nn.Softplus(beta=beta, threshold=threshold)
self._log2 = math.log(2.0)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| leoil/nequip | _ShiftedSoftPlus | false | 7,077 | [
"MIT"
] | 1 | 83b888797025c94b9963a508bc213a7c98da5bcb | https://github.com/leoil/nequip/tree/83b888797025c94b9963a508bc213a7c98da5bcb | import math
import torch
import torch.jit
import torch.nn.functional
import torch.nn
class Model(torch.nn.Module):
"""
Shifted softplus as defined in SchNet, NeurIPS 2017.
:param beta: value for the a more general softplus, default = 1
:param threshold: values above are linear function, default = 20
"""
_log2: 'float'
def __init__(self, beta=1, threshold=20):
super().__init__()
self.softplus = torch.nn.Softplus(beta=beta, threshold=threshold)
self._log2 = math.log(2.0)
def forward(self, x):
"""
Evaluate shifted softplus
:param x: torch.Tensor, input
:return: torch.Tensor, ssp(x)
"""
return self.softplus(x) - self._log2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BesselBasis | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zc/czcwoefxcbi23dqxnhhuymitmc67fa27uifwflhbdmygvxhk2ab5.py
# Topologically Sorted Source Nodes: [mul, truediv, numerator, truediv_1, mul_1], Original ATen: [aten.mul, aten.div, aten.sin]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# numerator => sin
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4.0), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sin, %unsqueeze), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 0.5), kwargs = {})
triton_poi_fused_div_mul_sin_0 = async_compile.triton('triton_poi_fused_div_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.25
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tmp6 = tmp5 / tmp1
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (8, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 8), (512, 128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, numerator, truediv_1, mul_1], Original ATen: [aten.mul, aten.div, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_sin_0.run(primals_1, primals_2, buf0, 2048, grid=grid(2048), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.jit
import torch.nn.functional
from torch import nn
import torch.nn
class BesselBasis(nn.Module):
r_max: 'float'
prefactor: 'float'
def __init__(self, r_max, num_basis=8, trainable=True):
"""Radial Bessel Basis, as proposed in DimeNet: https://arxiv.org/abs/2003.03123
Parameters
----------
r_max : float
Cutoff radius
num_basis : int
Number of Bessel Basis functions
trainable : bool
Train the :math:`n \\pi` part or not.
"""
super(BesselBasis, self).__init__()
self.trainable = trainable
self.num_basis = num_basis
self.r_max = float(r_max)
self.prefactor = 2.0 / self.r_max
bessel_weights = torch.linspace(start=1.0, end=num_basis, steps=
num_basis) * math.pi
if self.trainable:
self.bessel_weights = nn.Parameter(bessel_weights)
else:
self.register_buffer('bessel_weights', bessel_weights)
def forward(self, x):
"""
Evaluate Bessel Basis for input x.
Parameters
----------
x : torch.Tensor
Input
"""
numerator = torch.sin(self.bessel_weights * x.unsqueeze(-1) / self.
r_max)
return self.prefactor * (numerator / x.unsqueeze(-1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'r_max': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.jit
import torch.nn.functional
from torch import nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 0.25
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tmp6 = tmp5 / tmp1
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (8,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 8), (512, 128, 32, 8, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sin_0[grid(2048)](primals_1, primals_2,
buf0, 2048, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class BesselBasisNew(nn.Module):
r_max: 'float'
prefactor: 'float'
def __init__(self, r_max, num_basis=8, trainable=True):
"""Radial Bessel Basis, as proposed in DimeNet: https://arxiv.org/abs/2003.03123
Parameters
----------
r_max : float
Cutoff radius
num_basis : int
Number of Bessel Basis functions
trainable : bool
Train the :math:`n \\pi` part or not.
"""
super(BesselBasisNew, self).__init__()
self.trainable = trainable
self.num_basis = num_basis
self.r_max = float(r_max)
self.prefactor = 2.0 / self.r_max
bessel_weights = torch.linspace(start=1.0, end=num_basis, steps=
num_basis) * math.pi
if self.trainable:
self.bessel_weights = nn.Parameter(bessel_weights)
else:
self.register_buffer('bessel_weights', bessel_weights)
def forward(self, input_0):
primals_1 = self.bessel_weights
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| leoil/nequip | BesselBasis | false | 7,078 | [
"MIT"
] | 1 | 83b888797025c94b9963a508bc213a7c98da5bcb | https://github.com/leoil/nequip/tree/83b888797025c94b9963a508bc213a7c98da5bcb | import math
import torch
import torch.jit
import torch.nn.functional
from torch import nn
import torch.nn
class Model(nn.Module):
r_max: 'float'
prefactor: 'float'
def __init__(self, r_max, num_basis=8, trainable=True):
"""Radial Bessel Basis, as proposed in DimeNet: https://arxiv.org/abs/2003.03123
Parameters
----------
r_max : float
Cutoff radius
num_basis : int
Number of Bessel Basis functions
trainable : bool
Train the :math:`n \\pi` part or not.
"""
super().__init__()
self.trainable = trainable
self.num_basis = num_basis
self.r_max = float(r_max)
self.prefactor = 2.0 / self.r_max
bessel_weights = torch.linspace(start=1.0, end=num_basis, steps=
num_basis) * math.pi
if self.trainable:
self.bessel_weights = nn.Parameter(bessel_weights)
else:
self.register_buffer('bessel_weights', bessel_weights)
def forward(self, x):
"""
Evaluate Bessel Basis for input x.
Parameters
----------
x : torch.Tensor
Input
"""
numerator = torch.sin(self.bessel_weights * x.unsqueeze(-1) / self.
r_max)
return self.prefactor * (numerator / x.unsqueeze(-1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Decoder3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [Z_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Z_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jm/cjmjqfjv2ijia2nagoscrnh2gu57uuxti5zfjtxbtxgqzk2qxxoh.py
# Topologically Sorted Source Nodes: [Z_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Z_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ut/cutpg7prpn24s3lkqb6t44exfz2pu53buvzjvhbzb6tg67hufapp.py
# Topologically Sorted Source Nodes: [Z_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Z_5 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dz/cdzh2oi7rw6yhfcc6xr56aipmptbeerrkvpvz6bngz7przhadvfl.py
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
# Source node to ATen node mapping:
# std => exp
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_10,), kwargs = {})
triton_poi_fused_exp_3 = async_compile.triton('triton_poi_fused_exp_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (12, 8), (8, 1))
assert_size_stride(primals_7, (12, ), (1, ))
assert_size_stride(primals_8, (4, 12), (12, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [Z_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf2 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [Z_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf9, 512, grid=grid(512), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 12), (1, 8), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf4 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [Z_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf8, 768, grid=grid(768), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 12), (12, 1), 0), reinterpret_tensor(primals_8, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((1, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
triton_poi_fused_exp_3.run(primals_10, buf7, 1, grid=grid(1), stream=stream0)
del primals_10
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(buf5, (64, 12), (12, 1), 0), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((12, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder3(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H)
self.dec2 = nn.Linear(in_features=self.H, out_features=self.H * 2)
self.dec3 = nn.Linear(in_features=self.H * 2, out_features=self.H * 3)
self.dec4 = nn.Linear(in_features=self.H * 3, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, Z):
Z = self.dec1(Z)
Z = nn.functional.relu(Z)
Z = self.dec2(Z)
Z = nn.functional.relu(Z)
Z = self.dec3(Z)
Z = nn.functional.relu(Z)
mu = self.dec4(Z)
std = torch.exp(self.log_scale)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4, 'H': 4, 'D': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_exp_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (12, 8), (8, 1))
assert_size_stride(primals_7, (12,), (1,))
assert_size_stride(primals_8, (4, 12), (12, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf3,
primals_5, buf9, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_6, (8, 12), (1, 8), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 12), (192, 48, 12, 1), 0)
del buf4
buf8 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(768)](buf5,
primals_7, buf8, 768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 12),
(12, 1), 0), reinterpret_tensor(primals_8, (12, 4), (1, 12), 0),
alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((1,), (1,), torch.float32)
triton_poi_fused_exp_3[grid(1)](primals_10, buf7, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del primals_10
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 8), (8, 1), 0), reinterpret_tensor(buf5, (64, 12), (12,
1), 0), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10
class Decoder3New(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H)
self.dec2 = nn.Linear(in_features=self.H, out_features=self.H * 2)
self.dec3 = nn.Linear(in_features=self.H * 2, out_features=self.H * 3)
self.dec4 = nn.Linear(in_features=self.H * 3, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, input_0):
primals_10 = self.log_scale
primals_1 = self.dec1.weight
primals_2 = self.dec1.bias
primals_4 = self.dec2.weight
primals_5 = self.dec2.bias
primals_6 = self.dec3.weight
primals_7 = self.dec3.bias
primals_8 = self.dec4.weight
primals_9 = self.dec4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
| le0x99/deep-generative-modeling | Decoder3 | false | 7,079 | [
"MIT"
] | 1 | 40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | https://github.com/le0x99/deep-generative-modeling/tree/40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, M, H, D):
super().__init__()
self.D = D
self.M = M
self.H = H
self.dec1 = nn.Linear(in_features=self.M, out_features=self.H)
self.dec2 = nn.Linear(in_features=self.H, out_features=self.H * 2)
self.dec3 = nn.Linear(in_features=self.H * 2, out_features=self.H * 3)
self.dec4 = nn.Linear(in_features=self.H * 3, out_features=self.D)
self.log_scale = nn.Parameter(torch.Tensor([0.0]))
def forward(self, Z):
Z = self.dec1(Z)
Z = nn.functional.relu(Z)
Z = self.dec2(Z)
Z = nn.functional.relu(Z)
Z = self.dec3(Z)
Z = nn.functional.relu(Z)
mu = self.dec4(Z)
std = torch.exp(self.log_scale)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
GainesMul | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ef/cef3fsqnnk4ltvh4ba475r6rxd4sz2shaz4m32w6jfadxinoq77d.py
# Topologically Sorted Source Nodes: [type_1, type_2, xor, sub, type_3], Original ATen: [aten._to_copy, aten.bitwise_xor, aten.rsub]
# Source node to ATen node mapping:
# sub => sub
# type_1 => convert_element_type
# type_2 => convert_element_type_1
# type_3 => convert_element_type_2
# xor => bitwise_xor
# Graph fragment:
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.int8), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg1_1, torch.int8), kwargs = {})
# %bitwise_xor : [num_users=1] = call_function[target=torch.ops.aten.bitwise_xor.Tensor](args = (%convert_element_type, %convert_element_type_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %bitwise_xor), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sub, torch.float32), kwargs = {})
triton_poi_fused__to_copy_bitwise_xor_rsub_0 = async_compile.triton('triton_poi_fused__to_copy_bitwise_xor_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_bitwise_xor_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_bitwise_xor_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tmp0.to(tl.int8)
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp1 ^ tmp3
tmp5 = tl.full([1], 1, tl.int8)
tmp6 = tmp5 - tmp4
tmp7 = tmp6.to(tl.float32)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [type_1, type_2, xor, sub, type_3], Original ATen: [aten._to_copy, aten.bitwise_xor, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_bitwise_xor_rsub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class GainesMul(torch.nn.Module):
"""
this module is for Gaines stochastic multiplication, supporting unipolar/bipolar
"""
def __init__(self, mode='bipolar', stype=torch.float):
super(GainesMul, self).__init__()
self.mode = mode
self.stype = stype
def UnaryMul_forward(self, input_0, input_1):
if self.mode == 'unipolar':
return input_0.type(torch.int8) & input_1.type(torch.int8)
elif self.mode == 'bipolar':
return 1 - (input_0.type(torch.int8) ^ input_1.type(torch.int8))
else:
raise ValueError('UnaryMul mode is not implemented.')
def forward(self, input_0, input_1):
return self.UnaryMul_forward(input_0, input_1).type(self.stype)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_bitwise_xor_rsub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tmp0.to(tl.int8)
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp1 ^ tmp3
tmp5 = tl.full([1], 1, tl.int8)
tmp6 = tmp5 - tmp4
tmp7 = tmp6.to(tl.float32)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_bitwise_xor_rsub_0[grid(256)](arg0_1,
arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class GainesMulNew(torch.nn.Module):
"""
this module is for Gaines stochastic multiplication, supporting unipolar/bipolar
"""
def __init__(self, mode='bipolar', stype=torch.float):
super(GainesMulNew, self).__init__()
self.mode = mode
self.stype = stype
def UnaryMul_forward(self, input_0, input_1):
if self.mode == 'unipolar':
return input_0.type(torch.int8) & input_1.type(torch.int8)
elif self.mode == 'bipolar':
return 1 - (input_0.type(torch.int8) ^ input_1.type(torch.int8))
else:
raise ValueError('UnaryMul mode is not implemented.')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| libingzheren/Stochastic_Computing | GainesMul | false | 7,080 | [
"MIT"
] | 1 | c02461454618e9ce0c86ce695fad9e95d1ca5e00 | https://github.com/libingzheren/Stochastic_Computing/tree/c02461454618e9ce0c86ce695fad9e95d1ca5e00 | import torch
class Model(torch.nn.Module):
"""
this module is for Gaines stochastic multiplication, supporting unipolar/bipolar
"""
def __init__(self, mode='bipolar', stype=torch.float):
super().__init__()
self.mode = mode
self.stype = stype
def UnaryMul_forward(self, input_0, input_1):
if self.mode == 'unipolar':
return input_0.type(torch.int8) & input_1.type(torch.int8)
elif self.mode == 'bipolar':
return 1 - (input_0.type(torch.int8) ^ input_1.type(torch.int8))
else:
raise ValueError('UnaryMul mode is not implemented.')
def forward(self, input_0, input_1):
return self.UnaryMul_forward(input_0, input_1).type(self.stype)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Encoder3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pd/cpdu37l3bj63bjibgjk2ueagf7o3e26iukuvw6axiaa2bjb2e6op.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qt/cqtr4n2m6sbec7uajs5ro6uxx6le2tnyfdyxxbml57pxg4d7i4vr.py
# Topologically Sorted Source Nodes: [truediv, std], Original ATen: [aten.div, aten.exp]
# Source node to ATen node mapping:
# std => exp
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_1, 2), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
triton_poi_fused_div_exp_2 = async_compile.triton('triton_poi_fused_div_exp_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_exp_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 512, grid=grid(512), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, std], Original ATen: [aten.div, aten.exp]
triton_poi_fused_div_exp_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf4, (64, 4), (8, 1), 0), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Encoder3(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H * 2)
self.enc2 = nn.Linear(in_features=self.H * 2, out_features=self.H)
self.enc3 = nn.Linear(in_features=self.H, out_features=self.M * 2)
def forward(self, x):
x = self.enc1(x)
x = nn.functional.relu(x)
x = self.enc2(x)
x = nn.functional.relu(x)
x = self.enc3(x)
x = x.view(-1, 2, self.M)
mu = x[:, 0, :]
log_var = x[:, 1, :]
std = torch.exp(log_var / 2)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D': 4, 'H': 4, 'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_div_exp_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1,
primals_2, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_div_exp_2[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf4, (64, 4), (8, 1), 0
), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class Encoder3New(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H * 2)
self.enc2 = nn.Linear(in_features=self.H * 2, out_features=self.H)
self.enc3 = nn.Linear(in_features=self.H, out_features=self.M * 2)
def forward(self, input_0):
primals_1 = self.enc1.weight
primals_2 = self.enc1.bias
primals_4 = self.enc2.weight
primals_5 = self.enc2.bias
primals_6 = self.enc3.weight
primals_7 = self.enc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| le0x99/deep-generative-modeling | Encoder3 | false | 7,081 | [
"MIT"
] | 1 | 40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | https://github.com/le0x99/deep-generative-modeling/tree/40ffd1640dc3e5a6a2b4ba16a1d767034f081475 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, D, H, M):
super().__init__()
self.D = D
self.M = M
self.H = H
self.enc1 = nn.Linear(in_features=self.D, out_features=self.H * 2)
self.enc2 = nn.Linear(in_features=self.H * 2, out_features=self.H)
self.enc3 = nn.Linear(in_features=self.H, out_features=self.M * 2)
def forward(self, x):
x = self.enc1(x)
x = nn.functional.relu(x)
x = self.enc2(x)
x = nn.functional.relu(x)
x = self.enc3(x)
x = x.view(-1, 2, self.M)
mu = x[:, 0, :]
log_var = x[:, 1, :]
std = torch.exp(log_var / 2)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ProtectedMultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qa/cqazar4hg4rdjbxm7zr5mix2w3dkhfmvvjksn7c6lktr5yfe6ndy.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c2/cc2wsialcqiknwetscnqy3fzaqmmib3cxfb7tsfjx7hdlsxbdq7s.py
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# attn_weights_4 => div_1
# sum_1 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_17, [1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf8, buf9, 4, 16, grid=grid(4, 16), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_3.run(buf7, buf11, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class ProtectedMultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
incremental_state=None, need_weights=True, static_kv=False,
attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1)
], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if saved_state is not None:
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.
num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.
num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.
head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1,
self.head_dim)
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, torch.zeros
(key_padding_mask.size(0), 1).type_as(key_padding_mask)
], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
if self.onnx_trace:
attn_weights = torch.where(key_padding_mask.unsqueeze(1).
unsqueeze(2), torch.Tensor([float('-Inf')]),
attn_weights.float()).type_as(attn_weights)
else:
attn_weights = attn_weights.float().masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')
).type_as(attn_weights)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
all_inf = torch.isinf(attn_weights).all(dim=-1)
if all_inf.any():
attn_weights = attn_weights.float().masked_fill(all_inf.
unsqueeze(-1), 0).type_as(attn_weights)
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=
self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz,
embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class ProtectedMultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| laiguokun/fairseq | ProtectedMultiheadAttention | false | 7,082 | [
"MIT"
] | 1 | 6c01c91aac81eb2e3173add4463dfa45c404ffa5 | https://github.com/laiguokun/fairseq/tree/6c01c91aac81eb2e3173add4463dfa45c404ffa5 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class Model(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
incremental_state=None, need_weights=True, static_kv=False,
attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is n
# ... truncated (>4000 chars) for memory efficiency |
CustomGruCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3o/c3o3rsqvwtwazhhofhyvvpsx3gxpac2vdmskpikbppasjhmmc5gs.py
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# hy => add_3
# inputgate => sigmoid_1
# mul => mul
# mul_1 => mul_1
# newgate => tanh
# resetgate => sigmoid
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_11), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %mul), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_6, %tanh), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, %mul_1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sub_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sub_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sub_tanh_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr1 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), xmask)
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr6 + (x2), xmask)
tmp17 = tl.load(in_ptr7 + (x2), xmask)
tmp21 = tl.load(in_ptr8 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp7 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp22 = tmp21 - tmp20
tmp23 = tmp15 * tmp22
tmp24 = tmp20 + tmp23
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
tl.store(in_out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3)
del primals_9
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_n], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_11
del primals_12
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_13
del primals_14
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0.run(buf6, buf7, primals_2, buf1, primals_5, primals_8, buf3, primals_10, buf4, buf5, primals_6, buf8, 256, grid=grid(256), stream=stream0)
del buf1
del buf3
del primals_10
del primals_2
del primals_5
del primals_8
return (buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, buf5, buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class CustomGruCell(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(CustomGruCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, h):
i_r = self.fc_ir(x)
h_r = self.fc_hr(h)
i_z = self.fc_iz(x)
h_z = self.fc_hz(h)
i_n = self.fc_in(x)
h_n = self.fc_hn(h)
resetgate = (i_r + h_r).sigmoid()
inputgate = (i_z + h_z).sigmoid()
newgate = (i_n + resetgate * h_n).tanh()
hy = newgate + inputgate * (h - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, xmask)
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr6 + x2, xmask)
tmp17 = tl.load(in_ptr7 + x2, xmask)
tmp21 = tl.load(in_ptr8 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp7 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp22 = tmp21 - tmp20
tmp23 = tmp15 * tmp22
tmp24 = tmp20 + tmp23
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(in_out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr0 + x2, tmp24, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3)
del primals_9
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf4)
del primals_11
del primals_12
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf5)
del primals_13
del primals_14
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(256)](buf6, buf7,
primals_2, buf1, primals_5, primals_8, buf3, primals_10, buf4,
buf5, primals_6, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf3
del primals_10
del primals_2
del primals_5
del primals_8
return buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf4, buf5, buf6, buf7
class CustomGruCellNew(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(CustomGruCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, input_0, input_1):
primals_1 = self.fc_ir.weight
primals_2 = self.fc_ir.bias
primals_4 = self.fc_hr.weight
primals_5 = self.fc_hr.bias
primals_7 = self.fc_iz.weight
primals_8 = self.fc_iz.bias
primals_9 = self.fc_hz.weight
primals_10 = self.fc_hz.bias
primals_11 = self.fc_in.weight
primals_12 = self.fc_in.bias
primals_13 = self.fc_hn.weight
primals_14 = self.fc_hn.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| li4112/PySyft | CustomGruCell | false | 7,083 | [
"Apache-2.0"
] | 1 | e593cad25d6831623e6a2b6d34bcb04adcbe00f9 | https://github.com/li4112/PySyft/tree/e593cad25d6831623e6a2b6d34bcb04adcbe00f9 | import torch
import numpy as np
import torch.nn as nn
class Model(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, h):
i_r = self.fc_ir(x)
h_r = self.fc_hr(h)
i_z = self.fc_iz(x)
h_z = self.fc_hz(h)
i_n = self.fc_in(x)
h_n = self.fc_hn(h)
resetgate = (i_r + h_r).sigmoid()
inputgate = (i_z + h_z).sigmoid()
newgate = (i_n + resetgate * h_n).tanh()
hy = newgate + inputgate * (h - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MNIST_FC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cf/ccfgxezxafzrchfoupettzwuwyvp63xojv5p7oaeoavhmxd3nxhh.py
# Topologically Sorted Source Nodes: [xb_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# xb_1 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sh/csh4366dsc3zowcgmgajx7xaw7zluzalm55rlsqa5kisnk3pzpkp.py
# Topologically Sorted Source Nodes: [xb_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# xb_2 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (10*x0)), tmp11, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (32, 784), (784, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (10, 32), (32, 1))
assert_size_stride(primals_5, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 32), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [xb_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 128, grid=grid(128), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [xb_2], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf2, buf5, 4, 10, grid=grid(4), stream=stream0)
del buf2
return (buf5, primals_1, buf1, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MNIST_FC(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28 * 28, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, xb):
xb = xb.view(-1, 28 * 28)
xb = F.relu(self.fc1(xb))
xb = F.softmax(self.fc2(xb))
return xb.view(-1, 10)
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (32, 784), (784, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (10, 32), (32, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 32
), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(128)](buf1, primals_3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(32, 10), (1, 32), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK=1,
num_warps=2, num_stages=1)
del buf2
return buf5, primals_1, buf1, buf5, primals_4
class MNIST_FCNew(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28 * 28, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| lihebi/AdvAE | MNIST_FC | false | 7,084 | [
"MIT"
] | 1 | 56dea2a33c7da64bcc577b0c061a38406fdde101 | https://github.com/lihebi/AdvAE/tree/56dea2a33c7da64bcc577b0c061a38406fdde101 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28 * 28, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, xb):
xb = xb.view(-1, 28 * 28)
xb = F.relu(self.fc1(xb))
xb = F.softmax(self.fc2(xb))
return xb.view(-1, 10)
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return []
|
SpatialAttentionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/46/c46mg7rvdztu6n5oosf5c4if7ziag6obrxhwbn43lcdfibfuom7w.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %getitem], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py
# Topologically Sorted Source Nodes: [conv2d, out_1], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d => convolution
# out_1 => sigmoid
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d, out_1], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class SpatialAttentionModule(nn.Module):
def __init__(self):
super(SpatialAttentionModule, self).__init__()
self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=
7, stride=1, padding=3)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = torch.mean(x, dim=1, keepdim=True)
maxout, _ = torch.max(x, dim=1, keepdim=True)
out = torch.cat([avgout, maxout], dim=1)
out = self.sigmoid(self.conv2d(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class SpatialAttentionModuleNew(nn.Module):
def __init__(self):
super(SpatialAttentionModuleNew, self).__init__()
self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=
7, stride=1, padding=3)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lidawei0124/ISD_yolo_dual | SpatialAttentionModule | false | 7,085 | [
"Apache-2.0"
] | 1 | a4617a6ad20b3988f3b422df7a1b8533e32e241b | https://github.com/lidawei0124/ISD_yolo_dual/tree/a4617a6ad20b3988f3b422df7a1b8533e32e241b | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=
7, stride=1, padding=3)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = torch.mean(x, dim=1, keepdim=True)
maxout, _ = torch.max(x, dim=1, keepdim=True)
out = torch.cat([avgout, maxout], dim=1)
out = self.sigmoid(self.conv2d(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/w3/cw3egt7ajdde7mbqzrdxs4mdcaxj75b4l3brz5gbsf4yd73gbids.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (512, 784), (784, 1))
assert_size_stride(primals_3, (512, ), (1, ))
assert_size_stride(primals_4, (512, 512), (512, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (10, 512), (512, 1))
assert_size_stride(primals_7, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 2048, grid=grid(2048), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 2048, grid=grid(2048), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (512, 10), (1, 512), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (buf4, primals_1, buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 10)
self.droput = nn.Dropout(0.2)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.droput(x)
x = F.relu(self.fc2(x))
x = self.droput(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (512, 784), (784, 1))
assert_size_stride(primals_3, (512,), (1,))
assert_size_stride(primals_4, (512, 512), (512, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (10, 512), (512, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
512), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 512), (
1, 512), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(2048)](buf3, primals_5, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(512, 10), (1, 512), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.fc1 = nn.Linear(28 * 28, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 10)
self.droput = nn.Dropout(0.2)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| liguodongIOT/nlp-app-samples | Net | false | 7,086 | [
"Apache-2.0"
] | 1 | e0cc747e88c7b5c701b5099462d2dd6277c23381 | https://github.com/liguodongIOT/nlp-app-samples/tree/e0cc747e88c7b5c701b5099462d2dd6277c23381 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28 * 28, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 10)
self.droput = nn.Dropout(0.2)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.droput(x)
x = F.relu(self.fc2(x))
x = self.droput(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return []
|
Attention_ElementWiseProduct | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ti/ctighfmvx3fhiofuszhqc22eqpq4yxfe5zm4aqfvagwdj5pvefdz.py
# Topologically Sorted Source Nodes: [embed_input], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# embed_input => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %expand, %sub, %mul], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = (xindex // 16)
x1 = (xindex // 16) % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + ((4*x3) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tmp21 = tl.full([1], 16, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr0 + ((4*x3) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr1 + ((4*x1) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + (x4), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/u7/cu7fu4tmqt3iqywrz3xktm46fuototooejk66dgew3xgnbgrf7x3.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._prelu_kernel]
# Source node to ATen node mapping:
# output => gt, mul_1, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {})
triton_poi_fused__prelu_kernel_1 = async_compile.triton('triton_poi_fused__prelu_kernel_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mt/cmtharafrhduy5nfmsodmdnhoi4x33a2coi3gv6xt2cp6rjqa7ff.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# output_1 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_4,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (32, 16), (16, 1))
assert_size_stride(primals_4, (32, ), (1, ))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (1, 32), (32, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [embed_input], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 32), (1, 16), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._prelu_kernel]
triton_poi_fused__prelu_kernel_1.run(buf1, primals_5, buf2, 512, grid=grid(512), stream=stream0)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 1), (1, 32), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf4, primals_7, 16, grid=grid(16), stream=stream0)
del primals_7
return (buf4, primals_5, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), buf1, reinterpret_tensor(buf2, (16, 32), (32, 1), 0), buf4, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention_ElementWiseProduct(nn.Module):
"""
Input:
behavior: 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
candidate: 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output:
attention_weight: 3D tensor with shape: ``(batch_size, field_size, 1)``.
"""
def __init__(self, embedding_size):
super().__init__()
self.linear1 = nn.Linear(4 * embedding_size, 32)
self.linear2 = nn.Linear(32, 1)
self.prelu = nn.PReLU()
def forward(self, behavior, candidate):
candidate = candidate.expand_as(behavior)
embed_input = torch.cat([behavior, candidate, behavior - candidate,
behavior * candidate], dim=2)
output = self.prelu(self.linear1(embed_input))
output = F.sigmoid(self.linear2(output))
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embedding_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x1 = xindex // 16 % 4
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (4 * x3 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp23 = tl.load(in_ptr0 + (4 * x3 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x4, tmp30, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (32, 16), (16, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1, 32), (32, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_3, (16, 32), (1, 16), 0
), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.float32)
triton_poi_fused__prelu_kernel_1[grid(512)](buf1, primals_5, buf2,
512, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 32), (32, 1), 0),
reinterpret_tensor(primals_6, (32, 1), (1, 32), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0)
del buf3
triton_poi_fused_sigmoid_2[grid(16)](buf4, primals_7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_7
return buf4, primals_5, reinterpret_tensor(buf0, (16, 16), (16, 1), 0
), buf1, reinterpret_tensor(buf2, (16, 32), (32, 1), 0
), buf4, primals_6
class Attention_ElementWiseProductNew(nn.Module):
"""
Input:
behavior: 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
candidate: 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output:
attention_weight: 3D tensor with shape: ``(batch_size, field_size, 1)``.
"""
def __init__(self, embedding_size):
super().__init__()
self.linear1 = nn.Linear(4 * embedding_size, 32)
self.linear2 = nn.Linear(32, 1)
self.prelu = nn.PReLU()
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_6 = self.linear2.weight
primals_5 = self.linear2.bias
primals_7 = self.prelu.weight
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| liangzhang-lz/SparrowRecSys | Attention_ElementWiseProduct | false | 7,087 | [
"Apache-2.0"
] | 1 | 9fe1a27d3903117e6e2b5487c0689c0bd9281473 | https://github.com/liangzhang-lz/SparrowRecSys/tree/9fe1a27d3903117e6e2b5487c0689c0bd9281473 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Input:
behavior: 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
candidate: 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output:
attention_weight: 3D tensor with shape: ``(batch_size, field_size, 1)``.
"""
def __init__(self, embedding_size):
super().__init__()
self.linear1 = nn.Linear(4 * embedding_size, 32)
self.linear2 = nn.Linear(32, 1)
self.prelu = nn.PReLU()
def forward(self, behavior, candidate):
candidate = candidate.expand_as(behavior)
embed_input = torch.cat([behavior, candidate, behavior - candidate,
behavior * candidate], dim=2)
output = self.prelu(self.linear1(embed_input))
output = F.sigmoid(self.linear2(output))
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
AlexNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3j/c3jcyamwgi62wqn3zu3j6pebpmcimdfgshkghv6sxxavfuohbroi.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1476096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 96
x0 = xindex % 3844
x4 = (xindex // 3844)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/za/cza3ey7hkyy5xmmvxqevc4t7mmbqlzbllriupvfag6s2k5pa56nv.py
# Topologically Sorted Source Nodes: [x_2, div], Original ATen: [aten.max_pool2d_with_indices, aten.pow]
# Source node to ATen node mapping:
# div => pow_1
# x_2 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%getitem, 2), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_pow_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_pow_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 345600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = (xindex // 30) % 30
x2 = (xindex // 900)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (124 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (125 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (126 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tmp16 * tmp16
tl.store(out_ptr0 + (x3), tmp16, xmask)
tl.store(out_ptr1 + (x3), tmp41, xmask)
tl.store(out_ptr2 + (x3), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2b/c2bzrikzlv5fjqzv7srcovmz5lxmof4cinxfk7txkso46wlnfibs.py
# Topologically Sorted Source Nodes: [div_1, mul, add, div_2, x_3], Original ATen: [aten.avg_pool2d, aten.mul, aten.add, aten.pow, aten.div]
# Source node to ATen node mapping:
# add => add
# div_1 => avg_pool2d
# div_2 => pow_2
# mul => mul
# x_3 => div
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [5, 5], [1, 1], [2, 2]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 0.0001), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.0), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%getitem, %pow_2), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%getitem, 1.0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_8, 2.0), kwargs = {})
triton_poi_fused_add_avg_pool2d_div_mul_pow_2 = async_compile.triton('triton_poi_fused_add_avg_pool2d_div_mul_pow_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_div_mul_pow_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 26, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 345600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 30) % 30
x0 = xindex % 30
x3 = xindex
tmp118 = tl.load(in_ptr1 + (x3), xmask)
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 30, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-2) + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-62) + x3), tmp10 & xmask, other=0.0)
tmp12 = (-1) + x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-61) + x3), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-60) + x3), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = 1 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + ((-59) + x3), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = 2 + x0
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp5 & tmp36
tmp38 = tl.load(in_ptr0 + ((-58) + x3), tmp37 & xmask, other=0.0)
tmp39 = tmp38 + tmp32
tmp40 = (-1) + x1
tmp41 = tmp40 >= tmp1
tmp42 = tmp40 < tmp3
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp9
tmp45 = tl.load(in_ptr0 + ((-32) + x3), tmp44 & xmask, other=0.0)
tmp46 = tmp45 + tmp39
tmp47 = tmp43 & tmp15
tmp48 = tl.load(in_ptr0 + ((-31) + x3), tmp47 & xmask, other=0.0)
tmp49 = tmp48 + tmp46
tmp50 = tmp43 & tmp22
tmp51 = tl.load(in_ptr0 + ((-30) + x3), tmp50 & xmask, other=0.0)
tmp52 = tmp51 + tmp49
tmp53 = tmp43 & tmp29
tmp54 = tl.load(in_ptr0 + ((-29) + x3), tmp53 & xmask, other=0.0)
tmp55 = tmp54 + tmp52
tmp56 = tmp43 & tmp36
tmp57 = tl.load(in_ptr0 + ((-28) + x3), tmp56 & xmask, other=0.0)
tmp58 = tmp57 + tmp55
tmp59 = x1
tmp60 = tmp59 >= tmp1
tmp61 = tmp59 < tmp3
tmp62 = tmp60 & tmp61
tmp63 = tmp62 & tmp9
tmp64 = tl.load(in_ptr0 + ((-2) + x3), tmp63 & xmask, other=0.0)
tmp65 = tmp64 + tmp58
tmp66 = tmp62 & tmp15
tmp67 = tl.load(in_ptr0 + ((-1) + x3), tmp66 & xmask, other=0.0)
tmp68 = tmp67 + tmp65
tmp69 = tmp62 & tmp22
tmp70 = tl.load(in_ptr0 + (x3), tmp69 & xmask, other=0.0)
tmp71 = tmp70 + tmp68
tmp72 = tmp62 & tmp29
tmp73 = tl.load(in_ptr0 + (1 + x3), tmp72 & xmask, other=0.0)
tmp74 = tmp73 + tmp71
tmp75 = tmp62 & tmp36
tmp76 = tl.load(in_ptr0 + (2 + x3), tmp75 & xmask, other=0.0)
tmp77 = tmp76 + tmp74
tmp78 = 1 + x1
tmp79 = tmp78 >= tmp1
tmp80 = tmp78 < tmp3
tmp81 = tmp79 & tmp80
tmp82 = tmp81 & tmp9
tmp83 = tl.load(in_ptr0 + (28 + x3), tmp82 & xmask, other=0.0)
tmp84 = tmp83 + tmp77
tmp85 = tmp81 & tmp15
tmp86 = tl.load(in_ptr0 + (29 + x3), tmp85 & xmask, other=0.0)
tmp87 = tmp86 + tmp84
tmp88 = tmp81 & tmp22
tmp89 = tl.load(in_ptr0 + (30 + x3), tmp88 & xmask, other=0.0)
tmp90 = tmp89 + tmp87
tmp91 = tmp81 & tmp29
tmp92 = tl.load(in_ptr0 + (31 + x3), tmp91 & xmask, other=0.0)
tmp93 = tmp92 + tmp90
tmp94 = tmp81 & tmp36
tmp95 = tl.load(in_ptr0 + (32 + x3), tmp94 & xmask, other=0.0)
tmp96 = tmp95 + tmp93
tmp97 = 2 + x1
tmp98 = tmp97 >= tmp1
tmp99 = tmp97 < tmp3
tmp100 = tmp98 & tmp99
tmp101 = tmp100 & tmp9
tmp102 = tl.load(in_ptr0 + (58 + x3), tmp101 & xmask, other=0.0)
tmp103 = tmp102 + tmp96
tmp104 = tmp100 & tmp15
tmp105 = tl.load(in_ptr0 + (59 + x3), tmp104 & xmask, other=0.0)
tmp106 = tmp105 + tmp103
tmp107 = tmp100 & tmp22
tmp108 = tl.load(in_ptr0 + (60 + x3), tmp107 & xmask, other=0.0)
tmp109 = tmp108 + tmp106
tmp110 = tmp100 & tmp29
tmp111 = tl.load(in_ptr0 + (61 + x3), tmp110 & xmask, other=0.0)
tmp112 = tmp111 + tmp109
tmp113 = tmp100 & tmp36
tmp114 = tl.load(in_ptr0 + (62 + x3), tmp113 & xmask, other=0.0)
tmp115 = tmp114 + tmp112
tmp116 = 4 + ((-2)*x0) + ((-2)*x1) + (2*((32) * ((32) <= (3 + x0)) + (3 + x0) * ((3 + x0) < (32)))) + (2*((32) * ((32) <= (3 + x1)) + (3 + x1) * ((3 + x1) < (32)))) + (x0*x1) + (((32) * ((32) <= (3 + x0)) + (3 + x0) * ((3 + x0) < (32)))*((32) * ((32) <= (3 + x1)) + (3 + x1) * ((3 + x1) < (32)))) + ((-1)*x0*((32) * ((32) <= (3 + x1)) + (3 + x1) * ((3 + x1) < (32)))) + ((-1)*x1*((32) * ((32) <= (3 + x0)) + (3 + x0) * ((3 + x0) < (32))))
tmp117 = tmp115 / tmp116
tmp119 = 0.0001
tmp120 = tmp117 * tmp119
tmp121 = 1.0
tmp122 = tmp120 + tmp121
tmp123 = 0.75
tmp124 = libdevice.pow(tmp122, tmp123)
tmp125 = tmp118 / tmp124
tmp126 = 2.0
tmp127 = tmp118 * tmp126
tl.store(out_ptr0 + (x3), tmp117, xmask)
tl.store(out_ptr1 + (x3), tmp125, xmask)
tl.store(out_ptr2 + (x3), tmp127, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l7/cl7cxheq7ddvcecqhdge3naydmjeqypgyqphaqcitlorl5ly5qdz.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_4 => convolution_1
# x_5 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%div, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 2), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 921600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 900) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qs/cqs2ylb7hs6zjqwwzkmhix2qsvfnqvjwwcxs53224b37jxhkmtsd.py
# Topologically Sorted Source Nodes: [x_6, div_4], Original ATen: [aten.max_pool2d_with_indices, aten.pow]
# Source node to ATen node mapping:
# div_4 => pow_3
# x_6 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %pow_3 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%getitem_2, 2), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_pow_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_pow_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_pow_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_pow_4(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 200704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 14
x1 = (xindex // 14) % 14
x2 = (xindex // 196)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (30 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (31 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (32 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (60 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (61 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (62 + (2*x0) + (60*x1) + (900*x2)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tmp16 * tmp16
tl.store(out_ptr0 + (x3), tmp16, None)
tl.store(out_ptr1 + (x3), tmp41, None)
tl.store(out_ptr2 + (x3), tmp42, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ji/cjii5mxgbzszh7ysrdisbvam2vq53cdysofvlnl3sl3f2laljcf4.py
# Topologically Sorted Source Nodes: [div_5, mul_1, add_1, div_6, x_7], Original ATen: [aten.avg_pool2d, aten.mul, aten.add, aten.pow, aten.div]
# Source node to ATen node mapping:
# add_1 => add_1
# div_5 => avg_pool2d_1
# div_6 => pow_4
# mul_1 => mul_1
# x_7 => div_1
# Graph fragment:
# %avg_pool2d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_3, [5, 5], [1, 1], [2, 2]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d_1, 0.0001), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.75), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%getitem_2, %pow_4), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%getitem_2, 1.0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_6, 2.0), kwargs = {})
triton_poi_fused_add_avg_pool2d_div_mul_pow_5 = async_compile.triton('triton_poi_fused_add_avg_pool2d_div_mul_pow_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_div_mul_pow_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 26, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 200704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 14) % 14
x0 = xindex % 14
x3 = xindex
tmp118 = tl.load(in_ptr1 + (x3), None)
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 14, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-2) + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-30) + x3), tmp10, other=0.0)
tmp12 = (-1) + x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-29) + x3), tmp16, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-28) + x3), tmp23, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = 1 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + ((-27) + x3), tmp30, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = 2 + x0
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp5 & tmp36
tmp38 = tl.load(in_ptr0 + ((-26) + x3), tmp37, other=0.0)
tmp39 = tmp38 + tmp32
tmp40 = (-1) + x1
tmp41 = tmp40 >= tmp1
tmp42 = tmp40 < tmp3
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp9
tmp45 = tl.load(in_ptr0 + ((-16) + x3), tmp44, other=0.0)
tmp46 = tmp45 + tmp39
tmp47 = tmp43 & tmp15
tmp48 = tl.load(in_ptr0 + ((-15) + x3), tmp47, other=0.0)
tmp49 = tmp48 + tmp46
tmp50 = tmp43 & tmp22
tmp51 = tl.load(in_ptr0 + ((-14) + x3), tmp50, other=0.0)
tmp52 = tmp51 + tmp49
tmp53 = tmp43 & tmp29
tmp54 = tl.load(in_ptr0 + ((-13) + x3), tmp53, other=0.0)
tmp55 = tmp54 + tmp52
tmp56 = tmp43 & tmp36
tmp57 = tl.load(in_ptr0 + ((-12) + x3), tmp56, other=0.0)
tmp58 = tmp57 + tmp55
tmp59 = x1
tmp60 = tmp59 >= tmp1
tmp61 = tmp59 < tmp3
tmp62 = tmp60 & tmp61
tmp63 = tmp62 & tmp9
tmp64 = tl.load(in_ptr0 + ((-2) + x3), tmp63, other=0.0)
tmp65 = tmp64 + tmp58
tmp66 = tmp62 & tmp15
tmp67 = tl.load(in_ptr0 + ((-1) + x3), tmp66, other=0.0)
tmp68 = tmp67 + tmp65
tmp69 = tmp62 & tmp22
tmp70 = tl.load(in_ptr0 + (x3), tmp69, other=0.0)
tmp71 = tmp70 + tmp68
tmp72 = tmp62 & tmp29
tmp73 = tl.load(in_ptr0 + (1 + x3), tmp72, other=0.0)
tmp74 = tmp73 + tmp71
tmp75 = tmp62 & tmp36
tmp76 = tl.load(in_ptr0 + (2 + x3), tmp75, other=0.0)
tmp77 = tmp76 + tmp74
tmp78 = 1 + x1
tmp79 = tmp78 >= tmp1
tmp80 = tmp78 < tmp3
tmp81 = tmp79 & tmp80
tmp82 = tmp81 & tmp9
tmp83 = tl.load(in_ptr0 + (12 + x3), tmp82, other=0.0)
tmp84 = tmp83 + tmp77
tmp85 = tmp81 & tmp15
tmp86 = tl.load(in_ptr0 + (13 + x3), tmp85, other=0.0)
tmp87 = tmp86 + tmp84
tmp88 = tmp81 & tmp22
tmp89 = tl.load(in_ptr0 + (14 + x3), tmp88, other=0.0)
tmp90 = tmp89 + tmp87
tmp91 = tmp81 & tmp29
tmp92 = tl.load(in_ptr0 + (15 + x3), tmp91, other=0.0)
tmp93 = tmp92 + tmp90
tmp94 = tmp81 & tmp36
tmp95 = tl.load(in_ptr0 + (16 + x3), tmp94, other=0.0)
tmp96 = tmp95 + tmp93
tmp97 = 2 + x1
tmp98 = tmp97 >= tmp1
tmp99 = tmp97 < tmp3
tmp100 = tmp98 & tmp99
tmp101 = tmp100 & tmp9
tmp102 = tl.load(in_ptr0 + (26 + x3), tmp101, other=0.0)
tmp103 = tmp102 + tmp96
tmp104 = tmp100 & tmp15
tmp105 = tl.load(in_ptr0 + (27 + x3), tmp104, other=0.0)
tmp106 = tmp105 + tmp103
tmp107 = tmp100 & tmp22
tmp108 = tl.load(in_ptr0 + (28 + x3), tmp107, other=0.0)
tmp109 = tmp108 + tmp106
tmp110 = tmp100 & tmp29
tmp111 = tl.load(in_ptr0 + (29 + x3), tmp110, other=0.0)
tmp112 = tmp111 + tmp109
tmp113 = tmp100 & tmp36
tmp114 = tl.load(in_ptr0 + (30 + x3), tmp113, other=0.0)
tmp115 = tmp114 + tmp112
tmp116 = 4 + ((-2)*x0) + ((-2)*x1) + (2*((16) * ((16) <= (3 + x0)) + (3 + x0) * ((3 + x0) < (16)))) + (2*((16) * ((16) <= (3 + x1)) + (3 + x1) * ((3 + x1) < (16)))) + (x0*x1) + (((16) * ((16) <= (3 + x0)) + (3 + x0) * ((3 + x0) < (16)))*((16) * ((16) <= (3 + x1)) + (3 + x1) * ((3 + x1) < (16)))) + ((-1)*x0*((16) * ((16) <= (3 + x1)) + (3 + x1) * ((3 + x1) < (16)))) + ((-1)*x1*((16) * ((16) <= (3 + x0)) + (3 + x0) * ((3 + x0) < (16))))
tmp117 = tmp115 / tmp116
tmp119 = 0.0001
tmp120 = tmp117 * tmp119
tmp121 = 1.0
tmp122 = tmp120 + tmp121
tmp123 = 0.75
tmp124 = libdevice.pow(tmp122, tmp123)
tmp125 = tmp118 / tmp124
tmp126 = 2.0
tmp127 = tmp118 * tmp126
tl.store(out_ptr0 + (x3), tmp117, None)
tl.store(out_ptr1 + (x3), tmp125, None)
tl.store(out_ptr2 + (x3), tmp127, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5z/c5zfzjn2aqx6dpwajwe2vfhsllvzwsgqquc6wkhy35cdnwh2tbyo.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_8 => convolution_2
# x_9 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%div_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 301056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 196) % 384
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yn/cyng2xph3fpspqivby6n3hhn7nafozgsxmef26nxb4ghbtkve3xf.py
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_12 => convolution_4
# x_13 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 2), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 200704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 196) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dn/cdnlo5aymjge4ss5gspczlpvovvevlp4ezz34qa5gzoavy2awgks.py
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_14 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_4, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (14 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (15 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (16 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (28 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (29 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (30 + (2*x0) + (28*x1) + (196*x2)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x3), tmp16, None)
tl.store(out_ptr1 + (x3), tmp41, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (96, 3, 11, 11), (363, 121, 11, 1))
assert_size_stride(primals_2, (96, ), (1, ))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (256, 48, 5, 5), (1200, 25, 5, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (384, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (384, ), (1, ))
assert_size_stride(primals_8, (384, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_9, (384, ), (1, ))
assert_size_stride(primals_10, (256, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (4096, 9216), (9216, 1))
assert_size_stride(primals_13, (4096, ), (1, ))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096, ), (1, ))
assert_size_stride(primals_16, (1000, 4096), (4096, 1))
assert_size_stride(primals_17, (1000, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 96, 62, 62), (369024, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 96, 62, 62), (371712, 3872, 62, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 1476096, grid=grid(1476096), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1), torch.float32)
buf3 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1), torch.int8)
buf4 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, div], Original ATen: [aten.max_pool2d_with_indices, aten.pow]
triton_poi_fused_max_pool2d_with_indices_pow_1.run(buf1, buf2, buf3, buf4, 345600, grid=grid(345600), stream=stream0)
buf5 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1), torch.float32)
buf6 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1), torch.float32)
buf26 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [div_1, mul, add, div_2, x_3], Original ATen: [aten.avg_pool2d, aten.mul, aten.add, aten.pow, aten.div]
triton_poi_fused_add_avg_pool2d_div_mul_pow_2.run(buf4, buf2, buf5, buf6, buf26, 345600, grid=grid(345600), stream=stream0)
del buf2
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf7, (4, 256, 30, 30), (230400, 900, 30, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf8, primals_5, 921600, grid=grid(921600), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32)
buf10 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.int8)
buf11 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6, div_4], Original ATen: [aten.max_pool2d_with_indices, aten.pow]
triton_poi_fused_max_pool2d_with_indices_pow_4.run(buf8, buf9, buf10, buf11, 200704, grid=grid(200704), stream=stream0)
buf12 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32)
buf13 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32)
buf25 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32)
# Topologically Sorted Source Nodes: [div_5, mul_1, add_1, div_6, x_7], Original ATen: [aten.avg_pool2d, aten.mul, aten.add, aten.pow, aten.div]
triton_poi_fused_add_avg_pool2d_div_mul_pow_5.run(buf11, buf9, buf12, buf13, buf25, 200704, grid=grid(200704), stream=stream0)
del buf9
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 384, 14, 14), (75264, 196, 14, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf15, primals_7, 301056, grid=grid(301056), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf16, (4, 384, 14, 14), (75264, 196, 14, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf17, primals_9, 301056, grid=grid(301056), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf18, (4, 256, 14, 14), (50176, 196, 14, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf19, primals_11, 200704, grid=grid(200704), stream=stream0)
del primals_11
buf20 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch.float32)
buf21 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf19, buf20, buf21, 36864, grid=grid(36864), stream=stream0)
buf22 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf20, (4, 9216), (9216, 1), 0), reinterpret_tensor(primals_12, (9216, 4096), (1, 9216), 0), alpha=1, beta=1, out=buf22)
del primals_13
buf23 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf22, reinterpret_tensor(primals_14, (4096, 4096), (1, 4096), 0), alpha=1, beta=1, out=buf23)
del primals_15
buf24 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, buf23, reinterpret_tensor(primals_16, (4096, 1000), (1, 4096), 0), alpha=1, beta=1, out=buf24)
del primals_17
return (buf24, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf4, buf5, buf6, buf8, buf10, buf11, buf12, buf13, buf15, buf17, buf19, buf21, reinterpret_tensor(buf20, (4, 9216), (9216, 1), 0), buf22, buf23, primals_16, primals_14, primals_12, buf25, buf26, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((96, 3, 11, 11), (363, 121, 11, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 256, 256), (196608, 65536, 256, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 48, 5, 5), (1200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((384, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((384, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4096, 9216), (9216, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4096, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1000, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((1000, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LRN(nn.Module):
"""
Local Response Normalization
"""
def __init__(self, kernel_size, alpha, beta):
super(LRN, self).__init__()
self.avg_pool = nn.AvgPool2d(kernel_size=kernel_size, stride=1,
padding=int(kernel_size / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
div = x.pow(2)
div = self.avg_pool(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
class AlexNet(nn.Module):
def __init__(self, classes=1000):
"""
GPU : 2
"""
super(AlexNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=96, kernel_size=
11, stride=4, padding=0, bias=True)
self.relu1 = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.LRN1 = LRN(kernel_size=5, alpha=0.0001, beta=0.75)
self.conv2 = nn.Conv2d(in_channels=96, out_channels=256,
kernel_size=5, stride=1, padding=2, bias=True, groups=2)
self.relu2 = nn.ReLU()
self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.LRN2 = LRN(kernel_size=5, alpha=0.0001, beta=0.75)
self.conv3 = nn.Conv2d(in_channels=256, out_channels=384,
kernel_size=3, stride=1, padding=1, bias=True)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1,
bias=True, groups=2)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1,
groups=2)
self.relu5 = nn.ReLU()
self.max_pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.dense1 = nn.Linear(6 * 6 * 256, 4096)
self.dense2 = nn.Linear(4096, 4096)
self.dense3 = nn.Linear(4096, classes)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.max_pool1(x)
x = self.LRN1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.max_pool2(x)
x = self.LRN2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.relu4(x)
x = self.conv5(x)
x = self.relu5(x)
x = self.max_pool3(x)
x = x.view(-1, 6 * 6 * 256)
x = self.dense1(x)
x = self.dense2(x)
x = self.dense3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 256, 256])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1476096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 96
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_pow_1(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 345600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (124 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (125 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (126 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tmp16 * tmp16
tl.store(out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr1 + x3, tmp41, xmask)
tl.store(out_ptr2 + x3, tmp42, xmask)
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 345600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 30 % 30
x0 = xindex % 30
x3 = xindex
tmp118 = tl.load(in_ptr1 + x3, xmask)
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 30, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -2 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-62 + x3), tmp10 & xmask, other=0.0)
tmp12 = -1 + x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-61 + x3), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-60 + x3), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = 1 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + (-59 + x3), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = 2 + x0
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp5 & tmp36
tmp38 = tl.load(in_ptr0 + (-58 + x3), tmp37 & xmask, other=0.0)
tmp39 = tmp38 + tmp32
tmp40 = -1 + x1
tmp41 = tmp40 >= tmp1
tmp42 = tmp40 < tmp3
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp9
tmp45 = tl.load(in_ptr0 + (-32 + x3), tmp44 & xmask, other=0.0)
tmp46 = tmp45 + tmp39
tmp47 = tmp43 & tmp15
tmp48 = tl.load(in_ptr0 + (-31 + x3), tmp47 & xmask, other=0.0)
tmp49 = tmp48 + tmp46
tmp50 = tmp43 & tmp22
tmp51 = tl.load(in_ptr0 + (-30 + x3), tmp50 & xmask, other=0.0)
tmp52 = tmp51 + tmp49
tmp53 = tmp43 & tmp29
tmp54 = tl.load(in_ptr0 + (-29 + x3), tmp53 & xmask, other=0.0)
tmp55 = tmp54 + tmp52
tmp56 = tmp43 & tmp36
tmp57 = tl.load(in_ptr0 + (-28 + x3), tmp56 & xmask, other=0.0)
tmp58 = tmp57 + tmp55
tmp59 = x1
tmp60 = tmp59 >= tmp1
tmp61 = tmp59 < tmp3
tmp62 = tmp60 & tmp61
tmp63 = tmp62 & tmp9
tmp64 = tl.load(in_ptr0 + (-2 + x3), tmp63 & xmask, other=0.0)
tmp65 = tmp64 + tmp58
tmp66 = tmp62 & tmp15
tmp67 = tl.load(in_ptr0 + (-1 + x3), tmp66 & xmask, other=0.0)
tmp68 = tmp67 + tmp65
tmp69 = tmp62 & tmp22
tmp70 = tl.load(in_ptr0 + x3, tmp69 & xmask, other=0.0)
tmp71 = tmp70 + tmp68
tmp72 = tmp62 & tmp29
tmp73 = tl.load(in_ptr0 + (1 + x3), tmp72 & xmask, other=0.0)
tmp74 = tmp73 + tmp71
tmp75 = tmp62 & tmp36
tmp76 = tl.load(in_ptr0 + (2 + x3), tmp75 & xmask, other=0.0)
tmp77 = tmp76 + tmp74
tmp78 = 1 + x1
tmp79 = tmp78 >= tmp1
tmp80 = tmp78 < tmp3
tmp81 = tmp79 & tmp80
tmp82 = tmp81 & tmp9
tmp83 = tl.load(in_ptr0 + (28 + x3), tmp82 & xmask, other=0.0)
tmp84 = tmp83 + tmp77
tmp85 = tmp81 & tmp15
tmp86 = tl.load(in_ptr0 + (29 + x3), tmp85 & xmask, other=0.0)
tmp87 = tmp86 + tmp84
tmp88 = tmp81 & tmp22
tmp89 = tl.load(in_ptr0 + (30 + x3), tmp88 & xmask, other=0.0)
tmp90 = tmp89 + tmp87
tmp91 = tmp81 & tmp29
tmp92 = tl.load(in_ptr0 + (31 + x3), tmp91 & xmask, other=0.0)
tmp93 = tmp92 + tmp90
tmp94 = tmp81 & tmp36
tmp95 = tl.load(in_ptr0 + (32 + x3), tmp94 & xmask, other=0.0)
tmp96 = tmp95 + tmp93
tmp97 = 2 + x1
tmp98 = tmp97 >= tmp1
tmp99 = tmp97 < tmp3
tmp100 = tmp98 & tmp99
tmp101 = tmp100 & tmp9
tmp102 = tl.load(in_ptr0 + (58 + x3), tmp101 & xmask, other=0.0)
tmp103 = tmp102 + tmp96
tmp104 = tmp100 & tmp15
tmp105 = tl.load(in_ptr0 + (59 + x3), tmp104 & xmask, other=0.0)
tmp106 = tmp105 + tmp103
tmp107 = tmp100 & tmp22
tmp108 = tl.load(in_ptr0 + (60 + x3), tmp107 & xmask, other=0.0)
tmp109 = tmp108 + tmp106
tmp110 = tmp100 & tmp29
tmp111 = tl.load(in_ptr0 + (61 + x3), tmp110 & xmask, other=0.0)
tmp112 = tmp111 + tmp109
tmp113 = tmp100 & tmp36
tmp114 = tl.load(in_ptr0 + (62 + x3), tmp113 & xmask, other=0.0)
tmp115 = tmp114 + tmp112
tmp116 = 4 + -2 * x0 + -2 * x1 + 2 * (32 * (32 <= 3 + x0) + (3 + x0) *
(3 + x0 < 32)) + 2 * (32 * (32 <= 3 + x1) + (3 + x1) * (3 + x1 < 32)
) + x0 * x1 + (32 * (32 <= 3 + x0) + (3 + x0) * (3 + x0 < 32)) * (
32 * (32 <= 3 + x1) + (3 + x1) * (3 + x1 < 32)) + -1 * x0 * (32 * (
32 <= 3 + x1) + (3 + x1) * (3 + x1 < 32)) + -1 * x1 * (32 * (32 <=
3 + x0) + (3 + x0) * (3 + x0 < 32))
tmp117 = tmp115 / tmp116
tmp119 = 0.0001
tmp120 = tmp117 * tmp119
tmp121 = 1.0
tmp122 = tmp120 + tmp121
tmp123 = 0.75
tmp124 = libdevice.pow(tmp122, tmp123)
tmp125 = tmp118 / tmp124
tmp126 = 2.0
tmp127 = tmp118 * tmp126
tl.store(out_ptr0 + x3, tmp117, xmask)
tl.store(out_ptr1 + x3, tmp125, xmask)
tl.store(out_ptr2 + x3, tmp127, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 900 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_pow_4(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (30 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (31 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (32 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (60 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (61 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (62 + 2 * x0 + 60 * x1 + 900 * x2), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tmp16 * tmp16
tl.store(out_ptr0 + x3, tmp16, None)
tl.store(out_ptr1 + x3, tmp41, None)
tl.store(out_ptr2 + x3, tmp42, None)
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_5(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 14 % 14
x0 = xindex % 14
x3 = xindex
tmp118 = tl.load(in_ptr1 + x3, None)
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 14, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -2 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-30 + x3), tmp10, other=0.0)
tmp12 = -1 + x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-29 + x3), tmp16, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-28 + x3), tmp23, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = 1 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + (-27 + x3), tmp30, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = 2 + x0
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp5 & tmp36
tmp38 = tl.load(in_ptr0 + (-26 + x3), tmp37, other=0.0)
tmp39 = tmp38 + tmp32
tmp40 = -1 + x1
tmp41 = tmp40 >= tmp1
tmp42 = tmp40 < tmp3
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp9
tmp45 = tl.load(in_ptr0 + (-16 + x3), tmp44, other=0.0)
tmp46 = tmp45 + tmp39
tmp47 = tmp43 & tmp15
tmp48 = tl.load(in_ptr0 + (-15 + x3), tmp47, other=0.0)
tmp49 = tmp48 + tmp46
tmp50 = tmp43 & tmp22
tmp51 = tl.load(in_ptr0 + (-14 + x3), tmp50, other=0.0)
tmp52 = tmp51 + tmp49
tmp53 = tmp43 & tmp29
tmp54 = tl.load(in_ptr0 + (-13 + x3), tmp53, other=0.0)
tmp55 = tmp54 + tmp52
tmp56 = tmp43 & tmp36
tmp57 = tl.load(in_ptr0 + (-12 + x3), tmp56, other=0.0)
tmp58 = tmp57 + tmp55
tmp59 = x1
tmp60 = tmp59 >= tmp1
tmp61 = tmp59 < tmp3
tmp62 = tmp60 & tmp61
tmp63 = tmp62 & tmp9
tmp64 = tl.load(in_ptr0 + (-2 + x3), tmp63, other=0.0)
tmp65 = tmp64 + tmp58
tmp66 = tmp62 & tmp15
tmp67 = tl.load(in_ptr0 + (-1 + x3), tmp66, other=0.0)
tmp68 = tmp67 + tmp65
tmp69 = tmp62 & tmp22
tmp70 = tl.load(in_ptr0 + x3, tmp69, other=0.0)
tmp71 = tmp70 + tmp68
tmp72 = tmp62 & tmp29
tmp73 = tl.load(in_ptr0 + (1 + x3), tmp72, other=0.0)
tmp74 = tmp73 + tmp71
tmp75 = tmp62 & tmp36
tmp76 = tl.load(in_ptr0 + (2 + x3), tmp75, other=0.0)
tmp77 = tmp76 + tmp74
tmp78 = 1 + x1
tmp79 = tmp78 >= tmp1
tmp80 = tmp78 < tmp3
tmp81 = tmp79 & tmp80
tmp82 = tmp81 & tmp9
tmp83 = tl.load(in_ptr0 + (12 + x3), tmp82, other=0.0)
tmp84 = tmp83 + tmp77
tmp85 = tmp81 & tmp15
tmp86 = tl.load(in_ptr0 + (13 + x3), tmp85, other=0.0)
tmp87 = tmp86 + tmp84
tmp88 = tmp81 & tmp22
tmp89 = tl.load(in_ptr0 + (14 + x3), tmp88, other=0.0)
tmp90 = tmp89 + tmp87
tmp91 = tmp81 & tmp29
tmp92 = tl.load(in_ptr0 + (15 + x3), tmp91, other=0.0)
tmp93 = tmp92 + tmp90
tmp94 = tmp81 & tmp36
tmp95 = tl.load(in_ptr0 + (16 + x3), tmp94, other=0.0)
tmp96 = tmp95 + tmp93
tmp97 = 2 + x1
tmp98 = tmp97 >= tmp1
tmp99 = tmp97 < tmp3
tmp100 = tmp98 & tmp99
tmp101 = tmp100 & tmp9
tmp102 = tl.load(in_ptr0 + (26 + x3), tmp101, other=0.0)
tmp103 = tmp102 + tmp96
tmp104 = tmp100 & tmp15
tmp105 = tl.load(in_ptr0 + (27 + x3), tmp104, other=0.0)
tmp106 = tmp105 + tmp103
tmp107 = tmp100 & tmp22
tmp108 = tl.load(in_ptr0 + (28 + x3), tmp107, other=0.0)
tmp109 = tmp108 + tmp106
tmp110 = tmp100 & tmp29
tmp111 = tl.load(in_ptr0 + (29 + x3), tmp110, other=0.0)
tmp112 = tmp111 + tmp109
tmp113 = tmp100 & tmp36
tmp114 = tl.load(in_ptr0 + (30 + x3), tmp113, other=0.0)
tmp115 = tmp114 + tmp112
tmp116 = 4 + -2 * x0 + -2 * x1 + 2 * (16 * (16 <= 3 + x0) + (3 + x0) *
(3 + x0 < 16)) + 2 * (16 * (16 <= 3 + x1) + (3 + x1) * (3 + x1 < 16)
) + x0 * x1 + (16 * (16 <= 3 + x0) + (3 + x0) * (3 + x0 < 16)) * (
16 * (16 <= 3 + x1) + (3 + x1) * (3 + x1 < 16)) + -1 * x0 * (16 * (
16 <= 3 + x1) + (3 + x1) * (3 + x1 < 16)) + -1 * x1 * (16 * (16 <=
3 + x0) + (3 + x0) * (3 + x0 < 16))
tmp117 = tmp115 / tmp116
tmp119 = 0.0001
tmp120 = tmp117 * tmp119
tmp121 = 1.0
tmp122 = tmp120 + tmp121
tmp123 = 0.75
tmp124 = libdevice.pow(tmp122, tmp123)
tmp125 = tmp118 / tmp124
tmp126 = 2.0
tmp127 = tmp118 * tmp126
tl.store(out_ptr0 + x3, tmp117, None)
tl.store(out_ptr1 + x3, tmp125, None)
tl.store(out_ptr2 + x3, tmp127, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 196 % 384
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 196 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (14 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (15 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (16 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (28 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (29 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (30 + 2 * x0 + 28 * x1 + 196 * x2), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x3, tmp16, None)
tl.store(out_ptr1 + x3, tmp41, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (96, 3, 11, 11), (363, 121, 11, 1))
assert_size_stride(primals_2, (96,), (1,))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (256, 48, 5, 5), (1200, 25, 5, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (384, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (384, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_9, (384,), (1,))
assert_size_stride(primals_10, (256, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4096, 9216), (9216, 1))
assert_size_stride(primals_13, (4096,), (1,))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096,), (1,))
assert_size_stride(primals_16, (1000, 4096), (4096, 1))
assert_size_stride(primals_17, (1000,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 96, 62, 62), (369024, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 96, 62, 62), (371712, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1476096)](buf0, primals_2,
buf1, 1476096, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1),
torch.int8)
buf4 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_pow_1[grid(345600)](buf1,
buf2, buf3, buf4, 345600, XBLOCK=512, num_warps=8, num_stages=1)
buf5 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1),
torch.float32)
buf26 = empty_strided_cuda((4, 96, 30, 30), (86400, 900, 30, 1),
torch.float32)
triton_poi_fused_add_avg_pool2d_div_mul_pow_2[grid(345600)](buf4,
buf2, buf5, buf6, buf26, 345600, XBLOCK=512, num_warps=8,
num_stages=1)
del buf2
buf7 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf7, (4, 256, 30, 30), (230400, 900, 30, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_3[grid(921600)](buf8, primals_5,
921600, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1),
torch.float32)
buf10 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1),
torch.int8)
buf11 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_pow_4[grid(200704)](buf8,
buf9, buf10, buf11, 200704, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1),
torch.float32)
buf13 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1),
torch.float32)
buf25 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1),
torch.float32)
triton_poi_fused_add_avg_pool2d_div_mul_pow_5[grid(200704)](buf11,
buf9, buf12, buf13, buf25, 200704, XBLOCK=512, num_warps=8,
num_stages=1)
del buf9
buf14 = extern_kernels.convolution(buf13, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 384, 14, 14), (75264, 196, 14, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_6[grid(301056)](buf15, primals_7,
301056, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf16 = extern_kernels.convolution(buf15, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf16, (4, 384, 14, 14), (75264, 196, 14, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_6[grid(301056)](buf17, primals_9,
301056, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf18 = extern_kernels.convolution(buf17, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf18, (4, 256, 14, 14), (50176, 196, 14, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_7[grid(200704)](buf19, primals_11,
200704, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf20 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch.
float32)
buf21 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch.int8
)
triton_poi_fused_max_pool2d_with_indices_8[grid(36864)](buf19,
buf20, buf21, 36864, XBLOCK=256, num_warps=4, num_stages=1)
buf22 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf20, (4, 9216
), (9216, 1), 0), reinterpret_tensor(primals_12, (9216, 4096),
(1, 9216), 0), alpha=1, beta=1, out=buf22)
del primals_13
buf23 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.addmm(primals_15, buf22, reinterpret_tensor(
primals_14, (4096, 4096), (1, 4096), 0), alpha=1, beta=1, out=buf23
)
del primals_15
buf24 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.addmm(primals_17, buf23, reinterpret_tensor(
primals_16, (4096, 1000), (1, 4096), 0), alpha=1, beta=1, out=buf24
)
del primals_17
return (buf24, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf4, buf5, buf6, buf8, buf10, buf11, buf12,
buf13, buf15, buf17, buf19, buf21, reinterpret_tensor(buf20, (4,
9216), (9216, 1), 0), buf22, buf23, primals_16, primals_14,
primals_12, buf25, buf26)
class LRN(nn.Module):
"""
Local Response Normalization
"""
def __init__(self, kernel_size, alpha, beta):
super(LRN, self).__init__()
self.avg_pool = nn.AvgPool2d(kernel_size=kernel_size, stride=1,
padding=int(kernel_size / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
div = x.pow(2)
div = self.avg_pool(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
class AlexNetNew(nn.Module):
def __init__(self, classes=1000):
"""
GPU : 2
"""
super(AlexNetNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=96, kernel_size=
11, stride=4, padding=0, bias=True)
self.relu1 = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.LRN1 = LRN(kernel_size=5, alpha=0.0001, beta=0.75)
self.conv2 = nn.Conv2d(in_channels=96, out_channels=256,
kernel_size=5, stride=1, padding=2, bias=True, groups=2)
self.relu2 = nn.ReLU()
self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.LRN2 = LRN(kernel_size=5, alpha=0.0001, beta=0.75)
self.conv3 = nn.Conv2d(in_channels=256, out_channels=384,
kernel_size=3, stride=1, padding=1, bias=True)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1,
bias=True, groups=2)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1,
groups=2)
self.relu5 = nn.ReLU()
self.max_pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.dense1 = nn.Linear(6 * 6 * 256, 4096)
self.dense2 = nn.Linear(4096, 4096)
self.dense3 = nn.Linear(4096, classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.dense1.weight
primals_13 = self.dense1.bias
primals_14 = self.dense2.weight
primals_15 = self.dense2.bias
primals_16 = self.dense3.weight
primals_17 = self.dense3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| jjeamin/obJDetection | AlexNet | false | 7,088 | [
"MIT"
] | 1 | eb7fbc410beb00fad1a6477e827e9ce2d8efbac5 | https://github.com/jjeamin/obJDetection/tree/eb7fbc410beb00fad1a6477e827e9ce2d8efbac5 | import torch
import torch.nn as nn
class LRN(nn.Module):
"""
Local Response Normalization
"""
def __init__(self, kernel_size, alpha, beta):
super().__init__()
self.avg_pool = nn.AvgPool2d(kernel_size=kernel_size, stride=1,
padding=int(kernel_size / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
div = x.pow(2)
div = self.avg_pool(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
class Model(nn.Module):
def __init__(self, classes=1000):
"""
GPU : 2
"""
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=96, kernel_size=
11, stride=4, padding=0, bias=True)
self.relu1 = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.LRN1 = LRN(kernel_size=5, alpha=0.0001, beta=0.75)
self.conv2 = nn.Conv2d(in_channels=96, out_channels=256,
kernel_size=5, stride=1, padding=2, bias=True, groups=2)
self.relu2 = nn.ReLU()
self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.LRN2 = LRN(kernel_size=5, alpha=0.0001, beta=0.75)
self.conv3 = nn.Conv2d(in_channels=256, out_channels=384,
kernel_size=3, stride=1, padding=1, bias=True)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1,
bias=True, groups=2)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1,
groups=2)
self.relu5 = nn.ReLU()
self.max_pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.dense1 = nn.Linear(6 * 6 * 256, 4096)
self.dense2 = nn.Linear(4096, 4096)
self.dense3 = nn.Linear(4096, classes)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.max_pool1(x)
x = self.LRN1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.max_pool2(x)
x = self.LRN2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.relu4(x)
x = self.conv5(x)
x = self.relu5(x)
x = self.max_pool3(x)
x = x.view(-1, 6 * 6 * 256)
x = self.dense1(x)
x = self.dense2(x)
x = self.dense3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 256, 256])]
def get_init_inputs():
return []
|
AttentionConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xy/cxy77nk6tbfgwqyn5amat6tjjfsjhqlutzehwqrmr3xcuh7sc46s.py
# Topologically Sorted Source Nodes: [v_out_1, k_out_2, einsum], Original ATen: [aten.unfold, aten.cat, aten.mul]
# Source node to ATen node mapping:
# einsum => mul_1
# k_out_2 => cat
# v_out_1 => unfold_3
# Graph fragment:
# %unfold_3 : [num_users=2] = call_function[target=torch.ops.aten.unfold.default](args = (%unfold_2, 3, 4, 1), kwargs = {})
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add, %add_1], 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %permute_1), kwargs = {})
triton_poi_fused_cat_mul_unfold_0 = async_compile.triton('triton_poi_fused_cat_mul_unfold_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_mul_unfold_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_mul_unfold_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x3 = (xindex // 16) % 4
x4 = (xindex // 64)
x5 = xindex % 16
x2 = (xindex // 4) % 4
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp20 = tl.load(in_ptr3 + (x0), xmask)
tmp1 = x3
tmp2 = tl.full([1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1], 2, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + (x5 + (16*x3) + (64*x4)), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x2 + (4*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tmp11 = tmp1 >= tmp4
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp1 < tmp12
tmp14 = tl.load(in_ptr0 + (32 + x5 + (16*((-2) + x3)) + (64*x4)), tmp11 & xmask, other=0.0)
tmp15 = tl.load(in_ptr2 + (x1 + (4*((-2) + x3))), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp5, tmp10, tmp18)
tmp21 = 0.0
tmp22 = tmp19 >= tmp21
tmp23 = 1.0
tmp24 = -1.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 - tmp26
tmp28 = tmp25 * tmp19
tmp29 = tmp27 * tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp30 / tmp30
tmp32 = tmp31 * tmp0
tl.store(in_out_ptr0 + (x0), tmp0, xmask)
tl.store(out_ptr0 + (x0), tmp19, xmask)
tl.store(out_ptr1 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (2, 1, 1, 4, 1), (4, 4, 4, 1, 1))
assert_size_stride(primals_6, (2, 1, 1, 1, 4), (4, 4, 4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [q_out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [k_out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [v_out], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 4, 4), (64, 16, 16, 4, 4, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 1, 1, 4, 4), (64, 16, 16, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_out_1, k_out_2, einsum], Original ATen: [aten.unfold, aten.cat, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_mul_unfold_0.run(buf3, buf1, primals_5, primals_6, buf0, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf1
del primals_5
del primals_6
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_2, primals_3, primals_4, buf0, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, 1, 1, 4, 1), (4, 4, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 1, 1, 1, 4), (4, 4, 4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConv, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_mul_unfold_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x3 = xindex // 16 % 4
x4 = xindex // 64
x5 = xindex % 16
x2 = xindex // 4 % 4
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp20 = tl.load(in_ptr3 + x0, xmask)
tmp1 = x3
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 2, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + (x5 + 16 * x3 + 64 * x4), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x2 + 4 * x3), tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tmp11 = tmp1 >= tmp4
tl.full([1], 4, tl.int64)
tmp14 = tl.load(in_ptr0 + (32 + x5 + 16 * (-2 + x3) + 64 * x4), tmp11 &
xmask, other=0.0)
tmp15 = tl.load(in_ptr2 + (x1 + 4 * (-2 + x3)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp5, tmp10, tmp18)
tmp21 = 0.0
tmp22 = tmp19 >= tmp21
tmp23 = 1.0
tmp24 = -1.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 - tmp26
tmp28 = tmp25 * tmp19
tmp29 = tmp27 * tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp30 / tmp30
tmp32 = tmp31 * tmp0
tl.store(in_out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + x0, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (2, 1, 1, 4, 1), (4, 4, 4, 1, 1))
assert_size_stride(primals_6, (2, 1, 1, 1, 4), (4, 4, 4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 4, 4), (64, 16, 16, 4,
4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 1, 1, 4, 4), (64, 16, 16, 16, 4, 1
), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_mul_unfold_0[grid(256)](buf3, buf1, primals_5,
primals_6, buf0, buf4, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del primals_5
del primals_6
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_2, primals_3, primals_4, buf0, buf3, buf4
class AttentionConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConvNew, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def forward(self, input_0):
primals_5 = self.rel_h
primals_6 = self.rel_w
primals_2 = self.key_conv.weight
primals_3 = self.query_conv.weight
primals_4 = self.value_conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| likui01/DRFuser | AttentionConv | false | 7,089 | [
"MIT"
] | 1 | 06539a6fa9203b1e9dc9d4d944cfcd5f7603f5e9 | https://github.com/likui01/DRFuser/tree/06539a6fa9203b1e9dc9d4d944cfcd5f7603f5e9 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Split | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cp/ccplpqa4hjz3n3j5jujs47gyz26ndm5naxvwku5nkktkkpfuh4um.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ie/cie6ockbfnni22s6gh6b3qk2guxsbx7g4f3xzhpygcbbqvw47fbq.py
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x2 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(arg0_1, buf1, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Split(nn.Module):
def __init__(self):
super(Split, self).__init__()
def forward(self, x):
n = int(x.size(1) / 2)
x1 = x[:, :n, :, :].contiguous()
x2 = x[:, n:, :, :].contiguous()
return x1, x2
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(128)](arg0_1, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0, buf1
class SplitNew(nn.Module):
def __init__(self):
super(SplitNew, self).__init__()
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| lingzenan/invertible-resnet | Split | false | 7,090 | [
"MIT"
] | 1 | 57b1c0de51a885aed074b77628f3b0c85c548e70 | https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
n = int(x.size(1) / 2)
x1 = x[:, :n, :, :].contiguous()
x2 = x[:, n:, :, :].contiguous()
return x1, x2
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
squeeze | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 64, 64), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class squeeze(nn.Module):
def __init__(self, block_size):
super(squeeze, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def inverse(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, d_height, d_width, d_depth = output.size()
s_depth = int(d_depth / self.block_size_sq)
s_width = int(d_width * self.block_size)
s_height = int(d_height * self.block_size)
t_1 = output.contiguous().view(batch_size, d_height, d_width, self.
block_size_sq, s_depth)
spl = t_1.split(self.block_size, 3)
stack = [t_t.contiguous().view(batch_size, d_height, s_width,
s_depth) for t_t in spl]
output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).contiguous().view(batch_size, s_height, s_width, s_depth)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, s_height, _s_width, s_depth = output.size()
d_depth = s_depth * self.block_size_sq
d_height = int(s_height / self.block_size)
t_1 = output.split(self.block_size, 2)
stack = [t_t.contiguous().view(batch_size, d_height, d_depth) for
t_t in t_1]
output = torch.stack(stack, 1)
output = output.permute(0, 2, 1, 3)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'block_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 64, 64), 0),
class squeezeNew(nn.Module):
def __init__(self, block_size):
super(squeezeNew, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def inverse(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, d_height, d_width, d_depth = output.size()
s_depth = int(d_depth / self.block_size_sq)
s_width = int(d_width * self.block_size)
s_height = int(d_height * self.block_size)
t_1 = output.contiguous().view(batch_size, d_height, d_width, self.
block_size_sq, s_depth)
spl = t_1.split(self.block_size, 3)
stack = [t_t.contiguous().view(batch_size, d_height, s_width,
s_depth) for t_t in spl]
output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).contiguous().view(batch_size, s_height, s_width, s_depth)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| lingzenan/invertible-resnet | squeeze | false | 7,091 | [
"MIT"
] | 1 | 57b1c0de51a885aed074b77628f3b0c85c548e70 | https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, block_size):
super().__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def inverse(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, d_height, d_width, d_depth = output.size()
s_depth = int(d_depth / self.block_size_sq)
s_width = int(d_width * self.block_size)
s_height = int(d_height * self.block_size)
t_1 = output.contiguous().view(batch_size, d_height, d_width, self.
block_size_sq, s_depth)
spl = t_1.split(self.block_size, 3)
stack = [t_t.contiguous().view(batch_size, d_height, s_width,
s_depth) for t_t in spl]
output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).contiguous().view(batch_size, s_height, s_width, s_depth)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, s_height, _s_width, s_depth = output.size()
d_depth = s_depth * self.block_size_sq
d_height = int(s_height / self.block_size)
t_1 = output.split(self.block_size, 2)
stack = [t_t.contiguous().view(batch_size, d_height, d_depth) for
t_t in t_1]
output = torch.stack(stack, 1)
output = output.permute(0, 2, 1, 3)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Conv2dZeroInit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/b5/cb5h536lfxehnv2ezobudfl5wugu2y6mu444yw7yei4n22rp33zu.py
# Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp]
# Source node to ATen node mapping:
# exp => exp
# mul => mul
# mul_1 => mul_1
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %exp), kwargs = {})
triton_poi_fused_convolution_exp_mul_0 = async_compile.triton('triton_poi_fused_convolution_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0.run(buf1, primals_2, primals_4, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv2dZeroInit(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
out = super().forward(input)
return out * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4, 'filter_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0[grid(16)](buf1, primals_2,
primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, primals_4, buf1
class Conv2dZeroInitNew(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_4 = self.logs
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| lingzenan/invertible-resnet | Conv2dZeroInit | false | 7,092 | [
"MIT"
] | 1 | 57b1c0de51a885aed074b77628f3b0c85c548e70 | https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70 | import torch
import torch.nn as nn
class Model(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
out = super().forward(input)
return out * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
DQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sr/csrxdjbtbkq5mhx4lx76hdeti625uy52jalpuc5xjwghomvl635m.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yg/cyg4mfxxegclwowpan5hfbi5qulziofwhyotfohv246hvg36ojqn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 10240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 160
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/az/cazc23jb3eudmchjh2ve6ybhsu2s3xqvb3rzpx33nwlg7uxykld6.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k7/ck7ubdlr7lmmf3n3cyqqh3ars6whwam3nlccuztiqcxbmjovqjpr.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (160, 200), (200, 1))
assert_size_stride(primals_5, (160, ), (1, ))
assert_size_stride(primals_6, (120, 160), (160, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (60, 120), (120, 1))
assert_size_stride(primals_9, (60, ), (1, ))
assert_size_stride(primals_10, (4, 60), (60, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf0 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf12, 12800, grid=grid(12800), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 160), (160, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 160), (1, 200), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 160), (2560, 640, 160, 1), 0); del buf2 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 160), (2560, 640, 160, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf11, 10240, grid=grid(10240), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 160), (160, 1), 0), reinterpret_tensor(primals_6, (160, 120), (1, 160), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 120), (1920, 480, 120, 1), 0); del buf4 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 120), (1920, 480, 120, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf10, 7680, grid=grid(7680), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 120), (120, 1), 0), reinterpret_tensor(primals_8, (120, 60), (1, 120), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 60), (960, 240, 60, 1), 0); del buf6 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf7, primals_9, buf9, 3840, grid=grid(3840), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 60), (60, 1), 0), reinterpret_tensor(primals_10, (60, 4), (1, 60), 0), alpha=1, beta=1, out=buf8)
del primals_11
return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(buf3, (64, 160), (160, 1), 0), reinterpret_tensor(buf5, (64, 120), (120, 1), 0), reinterpret_tensor(buf7, (64, 60), (60, 1), 0), primals_10, buf9, primals_8, buf10, primals_6, buf11, primals_4, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((160, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((160, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 160), (160, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((60, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((60, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 60), (60, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class DQN(nn.Module):
def __init__(self, observation_size, action_size, H1=200, H2=160, H3=
120, H4=60):
"""
:param observation_size: Size of belief as defined in belief_agent.py
:param action_size: Model has 1 output for every single possible card in the deck.
:param H1: size of hidden layer 1
:param H2: size of hidden layer 2
"""
super().__init__()
self.fc1 = torch.nn.Linear(observation_size, H1)
self.fc2 = torch.nn.Linear(H1, H2)
self.fc3 = torch.nn.Linear(H2, H3)
self.fc4 = torch.nn.Linear(H3, H4)
self.fc5 = torch.nn.Linear(H4, action_size)
def forward(self, observation):
"""
Maps observation to action values.
"""
x = F.relu(self.fc1(observation))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return self.fc5(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'observation_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 160
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 7680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (160, 200), (200, 1))
assert_size_stride(primals_5, (160,), (1,))
assert_size_stride(primals_6, (120, 160), (160, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (60, 120), (120, 1))
assert_size_stride(primals_9, (60,), (1,))
assert_size_stride(primals_10, (4, 60), (60, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(12800)](buf1,
primals_2, buf12, 12800, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 160), (160, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 160), (1, 200), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 160), (2560, 640, 160, 1), 0)
del buf2
buf11 = empty_strided_cuda((4, 4, 4, 160), (2560, 640, 160, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(10240)](buf3,
primals_5, buf11, 10240, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 160), (160, 1), 0),
reinterpret_tensor(primals_6, (160, 120), (1, 160), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 120), (1920, 480, 120, 1), 0)
del buf4
buf10 = empty_strided_cuda((4, 4, 4, 120), (1920, 480, 120, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(7680)](buf5,
primals_7, buf10, 7680, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 120), (120, 1), 0),
reinterpret_tensor(primals_8, (120, 60), (1, 120), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 60), (960, 240, 60, 1), 0)
del buf6
buf9 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(3840)](buf7,
primals_9, buf9, 3840, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 60),
(60, 1), 0), reinterpret_tensor(primals_10, (60, 4), (1, 60), 0
), alpha=1, beta=1, out=buf8)
del primals_11
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 200), (200, 1), 0
), reinterpret_tensor(buf3, (64, 160), (160, 1), 0
), reinterpret_tensor(buf5, (64, 120), (120, 1), 0
), reinterpret_tensor(buf7, (64, 60), (60, 1), 0
), primals_10, buf9, primals_8, buf10, primals_6, buf11, primals_4, buf12
class DQNNew(nn.Module):
def __init__(self, observation_size, action_size, H1=200, H2=160, H3=
120, H4=60):
"""
:param observation_size: Size of belief as defined in belief_agent.py
:param action_size: Model has 1 output for every single possible card in the deck.
:param H1: size of hidden layer 1
:param H2: size of hidden layer 2
"""
super().__init__()
self.fc1 = torch.nn.Linear(observation_size, H1)
self.fc2 = torch.nn.Linear(H1, H2)
self.fc3 = torch.nn.Linear(H2, H3)
self.fc4 = torch.nn.Linear(H3, H4)
self.fc5 = torch.nn.Linear(H4, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| lilianluong/multitask-card-games | DQN | false | 7,093 | [
"MIT"
] | 1 | ae32e85583c61cc27a44946a6b5fa7c1e2c152ff | https://github.com/lilianluong/multitask-card-games/tree/ae32e85583c61cc27a44946a6b5fa7c1e2c152ff | import torch
import torch.nn.functional as F
from torch import nn
class Model(nn.Module):
def __init__(self, observation_size, action_size, H1=200, H2=160, H3=
120, H4=60):
"""
:param observation_size: Size of belief as defined in belief_agent.py
:param action_size: Model has 1 output for every single possible card in the deck.
:param H1: size of hidden layer 1
:param H2: size of hidden layer 2
"""
super().__init__()
self.fc1 = torch.nn.Linear(observation_size, H1)
self.fc2 = torch.nn.Linear(H1, H2)
self.fc3 = torch.nn.Linear(H2, H3)
self.fc4 = torch.nn.Linear(H3, H4)
self.fc5 = torch.nn.Linear(H4, action_size)
def forward(self, observation):
"""
Maps observation to action values.
"""
x = F.relu(self.fc1(observation))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return self.fc5(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MaxMinGroup | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6p/c6p6sxn6eyjkfrrvgdkwlhviqvdquvcktva3443bsgc22ofn2dk7.py
# Topologically Sorted Source Nodes: [maxmin], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# maxmin => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr0 + ((4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (1 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tl.load(in_ptr0 + (2 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (3 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + (x3), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [maxmin], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_units))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class MaxMinGroup(nn.Module):
def __init__(self, group_size, axis=-1):
super(MaxMinGroup, self).__init__()
self.group_size = group_size
self.axis = axis
def forward(self, x):
maxes = maxout_by_group(x, self.group_size, self.axis)
mins = minout_by_group(x, self.group_size, self.axis)
maxmin = torch.cat((maxes, mins), dim=1)
return maxmin
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'group_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 &
xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * (-4 + x1) + 64 * x2), tmp14 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + x3, tmp26, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_units))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class MaxMinGroupNew(nn.Module):
def __init__(self, group_size, axis=-1):
super(MaxMinGroupNew, self).__init__()
self.group_size = group_size
self.axis = axis
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| lingzenan/invertible-resnet | MaxMinGroup | false | 7,094 | [
"MIT"
] | 1 | 57b1c0de51a885aed074b77628f3b0c85c548e70 | https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70 | import torch
import torch.nn as nn
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_units))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class Model(nn.Module):
def __init__(self, group_size, axis=-1):
super().__init__()
self.group_size = group_size
self.axis = axis
def forward(self, x):
maxes = maxout_by_group(x, self.group_size, self.axis)
mins = minout_by_group(x, self.group_size, self.axis)
maxmin = torch.cat((maxes, mins), dim=1)
return maxmin
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SpatialGate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3k/c3kqucdvk4o4zpjqokxiye7xneshc77mutxurbuxezpwyz5n5xoj.py
# Topologically Sorted Source Nodes: [tanh, gate, gate_1, x, output], Original ATen: [aten.tanh, aten.clamp, aten.view, aten.mul]
# Source node to ATen node mapping:
# gate => clamp_min
# gate_1 => view_1
# output => view_2
# tanh => tanh
# x => mul
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%tanh, 0), kwargs = {})
# %view_1 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clamp_min, [4, -1, 4, 4]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %view_2 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%mul, [4, -1, 4, 4]), kwargs = {})
triton_poi_fused_clamp_mul_tanh_view_1 = async_compile.triton('triton_poi_fused_clamp_mul_tanh_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_mul_tanh_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_mul_tanh_view_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = libdevice.tanh(tmp1)
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, gate, gate_1, x, output], Original ATen: [aten.tanh, aten.clamp, aten.view, aten.mul]
triton_poi_fused_clamp_mul_tanh_view_1.run(primals_4, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
class SpatialGate(nn.Module):
def __init__(self, in_channels: 'int', num_groups: 'int'=1, kernel_size:
'int'=1, padding: 'int'=0, stride: 'int'=1, gate_activation: 'str'=
'ReTanH', gate_activation_kargs: 'dict'=None, get_running_cost:
'callable'=None):
super(SpatialGate, self).__init__()
self.num_groups = num_groups
self.gate_conv = nn.Conv2d(in_channels, num_groups, kernel_size,
padding=padding, stride=stride)
self.gate_activation = gate_activation
self.gate_activation_kargs = gate_activation_kargs
if gate_activation == 'ReTanH':
self.gate_activate = lambda x: torch.tanh(x).clamp(min=0)
elif gate_activation == 'Sigmoid':
self.gate_activate = lambda x: torch.sigmoid(x)
elif gate_activation == 'GeReTanH':
assert 'tau' in gate_activation_kargs
tau = gate_activation_kargs['tau']
ttau = math.tanh(tau)
self.gate_activate = lambda x: ((torch.tanh(x - tau) + ttau) /
(1 + ttau)).clamp(min=0)
else:
raise NotImplementedError()
self.get_running_cost = get_running_cost
self.running_cost = None
self.init_parameters()
def init_parameters(self, init_gate=0.99):
if self.gate_activation == 'ReTanH':
bias_value = 0.5 * math.log((1 + init_gate) / (1 - init_gate))
elif self.gate_activation == 'Sigmoid':
bias_value = 0.5 * math.log(init_gate / (1 - init_gate))
elif self.gate_activation == 'GeReTanH':
tau = self.gate_activation_kargs['tau']
bias_value = 0.5 * math.log((1 + init_gate * math.exp(2 * tau)) /
(1 - init_gate))
nn.init.normal_(self.gate_conv.weight, std=0.01)
nn.init.constant_(self.gate_conv.bias, bias_value)
def encode(self, *inputs):
outputs = [x.view(x.shape[0] * self.num_groups, -1, *x.shape[2:]) for
x in inputs]
return outputs
def decode(self, *inputs):
outputs = [x.view(x.shape[0] // self.num_groups, -1, *x.shape[2:]) for
x in inputs]
return outputs
def update_running_cost(self, gate):
if self.get_running_cost is not None:
cost = self.get_running_cost(gate)
if self.running_cost is not None:
self.running_cost = [(x + y) for x, y in zip(self.
running_cost, cost)]
else:
self.running_cost = cost
def clear_running_cost(self):
self.running_cost = None
def forward(self, data_input, gate_input, masked_func=None):
gate = self.gate_activate(self.gate_conv(gate_input))
self.update_running_cost(gate)
if masked_func is not None:
data_input = masked_func(data_input, gate)
data, gate = self.encode(data_input, gate)
output, = self.decode(data * gate)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_clamp_mul_tanh_view_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = libdevice.tanh(tmp1)
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_tanh_view_1[grid(256)](primals_4, buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, primals_4, buf1
class SpatialGateNew(nn.Module):
def __init__(self, in_channels: 'int', num_groups: 'int'=1, kernel_size:
'int'=1, padding: 'int'=0, stride: 'int'=1, gate_activation: 'str'=
'ReTanH', gate_activation_kargs: 'dict'=None, get_running_cost:
'callable'=None):
super(SpatialGateNew, self).__init__()
self.num_groups = num_groups
self.gate_conv = nn.Conv2d(in_channels, num_groups, kernel_size,
padding=padding, stride=stride)
self.gate_activation = gate_activation
self.gate_activation_kargs = gate_activation_kargs
if gate_activation == 'ReTanH':
self.gate_activate = lambda x: torch.tanh(x).clamp(min=0)
elif gate_activation == 'Sigmoid':
self.gate_activate = lambda x: torch.sigmoid(x)
elif gate_activation == 'GeReTanH':
assert 'tau' in gate_activation_kargs
tau = gate_activation_kargs['tau']
ttau = math.tanh(tau)
self.gate_activate = lambda x: ((torch.tanh(x - tau) + ttau) /
(1 + ttau)).clamp(min=0)
else:
raise NotImplementedError()
self.get_running_cost = get_running_cost
self.running_cost = None
self.init_parameters()
def init_parameters(self, init_gate=0.99):
if self.gate_activation == 'ReTanH':
bias_value = 0.5 * math.log((1 + init_gate) / (1 - init_gate))
elif self.gate_activation == 'Sigmoid':
bias_value = 0.5 * math.log(init_gate / (1 - init_gate))
elif self.gate_activation == 'GeReTanH':
tau = self.gate_activation_kargs['tau']
bias_value = 0.5 * math.log((1 + init_gate * math.exp(2 * tau)) /
(1 - init_gate))
nn.init.normal_(self.gate_conv.weight, std=0.01)
nn.init.constant_(self.gate_conv.bias, bias_value)
def encode(self, *inputs):
outputs = [x.view(x.shape[0] * self.num_groups, -1, *x.shape[2:]) for
x in inputs]
return outputs
def decode(self, *inputs):
outputs = [x.view(x.shape[0] // self.num_groups, -1, *x.shape[2:]) for
x in inputs]
return outputs
def update_running_cost(self, gate):
if self.get_running_cost is not None:
cost = self.get_running_cost(gate)
if self.running_cost is not None:
self.running_cost = [(x + y) for x, y in zip(self.
running_cost, cost)]
else:
self.running_cost = cost
def clear_running_cost(self):
self.running_cost = None
def forward(self, input_0, input_1):
primals_1 = self.gate_conv.weight
primals_2 = self.gate_conv.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| lingtengqiu/LearnableTreeFilterV2 | SpatialGate | false | 7,095 | [
"Apache-2.0"
] | 1 | 3814a5a84c0a5c33d6538749eaf5aed4827366de | https://github.com/lingtengqiu/LearnableTreeFilterV2/tree/3814a5a84c0a5c33d6538749eaf5aed4827366de | import math
import torch
import torch.nn as nn
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
class Model(nn.Module):
def __init__(self, in_channels: 'int', num_groups: 'int'=1, kernel_size:
'int'=1, padding: 'int'=0, stride: 'int'=1, gate_activation: 'str'=
'ReTanH', gate_activation_kargs: 'dict'=None, get_running_cost:
'callable'=None):
super().__init__()
self.num_groups = num_groups
self.gate_conv = nn.Conv2d(in_channels, num_groups, kernel_size,
padding=padding, stride=stride)
self.gate_activation = gate_activation
self.gate_activation_kargs = gate_activation_kargs
if gate_activation == 'ReTanH':
self.gate_activate = lambda x: torch.tanh(x).clamp(min=0)
elif gate_activation == 'Sigmoid':
self.gate_activate = lambda x: torch.sigmoid(x)
elif gate_activation == 'GeReTanH':
assert 'tau' in gate_activation_kargs
tau = gate_activation_kargs['tau']
ttau = math.tanh(tau)
self.gate_activate = lambda x: ((torch.tanh(x - tau) + ttau) /
(1 + ttau)).clamp(min=0)
else:
raise NotImplementedError()
self.get_running_cost = get_running_cost
self.running_cost = None
self.init_parameters()
def init_parameters(self, init_gate=0.99):
if self.gate_activation == 'ReTanH':
bias_value = 0.5 * math.log((1 + init_gate) / (1 - init_gate))
elif self.gate_activation == 'Sigmoid':
bias_value = 0.5 * math.log(init_gate / (1 - init_gate))
elif self.gate_activation == 'GeReTanH':
tau = self.gate_activation_kargs['tau']
bias_value = 0.5 * math.log((1 + init_gate * math.exp(2 * tau)) /
(1 - init_gate))
nn.init.normal_(self.gate_conv.weight, std=0.01)
nn.init.constant_(self.gate_conv.bias, bias_value)
def encode(self, *inputs):
outputs = [x.view(x.shape[0] * self.num_groups, -1, *x.shape[2:]) for
x in inputs]
return outputs
def decode(self, *inputs):
outputs = [x.view(x.shape[0] // self.num_groups, -1, *x.shape[2:]) for
x in inputs]
return outputs
def update_running_cost(self, gate):
if self.get_running_cost is not None:
cost = self.get_running_cost(gate)
if self.running_cost is not None:
self.running_cost = [(x + y) for x, y in zip(self.
running_cost, cost)]
else:
self.running_cost = cost
def clear_running_cost(self):
self.running_cost = None
def forward(self, data_input, gate_input, masked_func=None):
gate = self.gate_activate(self.gate_conv(gate_input))
self.update_running_cost(gate)
if masked_func is not None:
data_input = masked_func(data_input, gate)
data, gate = self.encode(data_input, gate)
output, = self.decode(data * gate)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ClassificationCircleLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fn/cfnbjifotifsbkjqwcrjvmwysi2u65kyzrt5izbv7n7pgfrnvakl.py
# Topologically Sorted Source Nodes: [mask, sub, positive_weighting, mul, sub_2, mul_1, sub_1, add, negative_weighting, mul_2, sub_3, sub_4, mul_3, logits, loss], Original ATen: [aten.scatter, aten.rsub, aten.clamp, aten.mul, aten.sub, aten.add, aten.where, aten._log_softmax]
# Source node to ATen node mapping:
# add => add
# logits => where
# loss => amax, exp, sub_5, sum_1
# mask => scatter_upon_const_tensor
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# negative_weighting => clamp_min_1
# positive_weighting => clamp_min
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# Graph fragment:
# %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: False, dtype: torch.bool, dim: 1, selector: %unsqueeze, val: 1})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.25, %arg0_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, 256.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.75), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 0.25), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min_1, 256.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, 0.25), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %sub_4), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%scatter_upon_const_tensor, %mul_1, %mul_3), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [1], True), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_5,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0 = async_compile.triton('triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp6 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp67 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], True, tl.int1)
tmp4 = tl.full([1], False, tl.int1)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp6.to(tl.float32)
tmp8 = 1.25
tmp9 = tmp8 - tmp7
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = 256.0
tmp13 = tmp11 * tmp12
tmp14 = 0.75
tmp15 = tmp7 - tmp14
tmp16 = tmp13 * tmp15
tmp17 = tmp6 - tmp1
tmp18 = tmp17.to(tl.float32)
tmp19 = 0.25
tmp20 = tmp18 + tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp10)
tmp22 = tmp21 * tmp12
tmp23 = tmp18 - tmp19
tmp24 = tmp22 * tmp23
tmp25 = tl.where(tmp5, tmp16, tmp24)
tmp26 = tl.full([1], 1, tl.int64)
tmp27 = tmp0 == tmp26
tmp28 = tl.where(tmp27, tmp3, tmp4)
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp8 - tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp10)
tmp33 = tmp32 * tmp12
tmp34 = tmp30 - tmp14
tmp35 = tmp33 * tmp34
tmp36 = tmp29 - tmp1
tmp37 = tmp36.to(tl.float32)
tmp38 = tmp37 + tmp19
tmp39 = triton_helpers.maximum(tmp38, tmp10)
tmp40 = tmp39 * tmp12
tmp41 = tmp37 - tmp19
tmp42 = tmp40 * tmp41
tmp43 = tl.where(tmp28, tmp35, tmp42)
tmp44 = triton_helpers.maximum(tmp25, tmp43)
tmp45 = tl.full([1], 2, tl.int64)
tmp46 = tmp0 == tmp45
tmp47 = tl.where(tmp46, tmp3, tmp4)
tmp49 = tmp48.to(tl.float32)
tmp50 = tmp8 - tmp49
tmp51 = triton_helpers.maximum(tmp50, tmp10)
tmp52 = tmp51 * tmp12
tmp53 = tmp49 - tmp14
tmp54 = tmp52 * tmp53
tmp55 = tmp48 - tmp1
tmp56 = tmp55.to(tl.float32)
tmp57 = tmp56 + tmp19
tmp58 = triton_helpers.maximum(tmp57, tmp10)
tmp59 = tmp58 * tmp12
tmp60 = tmp56 - tmp19
tmp61 = tmp59 * tmp60
tmp62 = tl.where(tmp47, tmp54, tmp61)
tmp63 = triton_helpers.maximum(tmp44, tmp62)
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp0 == tmp64
tmp66 = tl.where(tmp65, tmp3, tmp4)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp8 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp10)
tmp71 = tmp70 * tmp12
tmp72 = tmp68 - tmp14
tmp73 = tmp71 * tmp72
tmp74 = tmp67 - tmp1
tmp75 = tmp74.to(tl.float32)
tmp76 = tmp75 + tmp19
tmp77 = triton_helpers.maximum(tmp76, tmp10)
tmp78 = tmp77 * tmp12
tmp79 = tmp75 - tmp19
tmp80 = tmp78 * tmp79
tmp81 = tl.where(tmp66, tmp73, tmp80)
tmp82 = triton_helpers.maximum(tmp63, tmp81)
tmp83 = tmp25 - tmp82
tmp84 = tl_math.exp(tmp83)
tmp85 = tmp43 - tmp82
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp84 + tmp86
tmp88 = tmp62 - tmp82
tmp89 = tl_math.exp(tmp88)
tmp90 = tmp87 + tmp89
tmp91 = tmp81 - tmp82
tmp92 = tl_math.exp(tmp91)
tmp93 = tmp90 + tmp92
tl.store(out_ptr0 + (x0), tmp82, xmask)
tl.store(out_ptr1 + (x0), tmp93, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f7/cf76gdye3s6mqx3jjsxgcjijmfytmkiiff5dl6dyksihxbvh3muu.py
# Topologically Sorted Source Nodes: [mask, sub, positive_weighting, mul, sub_2, mul_1, sub_1, add, negative_weighting, mul_2, sub_3, sub_4, mul_3, logits, loss], Original ATen: [aten.scatter, aten.rsub, aten.clamp, aten.mul, aten.sub, aten.add, aten.where, aten._log_softmax]
# Source node to ATen node mapping:
# add => add
# logits => where
# loss => log, sub_5, sub_6
# mask => scatter_upon_const_tensor
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# negative_weighting => clamp_min_1
# positive_weighting => clamp_min
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# Graph fragment:
# %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: False, dtype: torch.bool, dim: 1, selector: %unsqueeze, val: 1})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.25, %arg0_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, 256.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.75), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 0.25), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min_1, 256.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, 0.25), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %sub_4), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%scatter_upon_const_tensor, %mul_1, %mul_3), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %log), kwargs = {})
triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1 = async_compile.triton('triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (x2), xmask)
tmp27 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], True, tl.int1)
tmp4 = tl.full([1], False, tl.int1)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp6.to(tl.float32)
tmp8 = 1.25
tmp9 = tmp8 - tmp7
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = 256.0
tmp13 = tmp11 * tmp12
tmp14 = 0.75
tmp15 = tmp7 - tmp14
tmp16 = tmp13 * tmp15
tmp17 = tl.full([1], 0, tl.int64)
tmp18 = tmp6 - tmp17
tmp19 = tmp18.to(tl.float32)
tmp20 = 0.25
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp21, tmp10)
tmp23 = tmp22 * tmp12
tmp24 = tmp19 - tmp20
tmp25 = tmp23 * tmp24
tmp26 = tl.where(tmp5, tmp16, tmp25)
tmp28 = tmp26 - tmp27
tmp30 = tl_math.log(tmp29)
tmp31 = tmp28 - tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bz/cbzq7t26pihe3ec3rnmx4wscu3agefng6kzj2qs5lh462cc5t6ij.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.nll_loss_forward]
# Source node to ATen node mapping:
# loss => convert_element_type, div, full_default_2, ne_1, ne_2, neg, sum_2, sum_3, where_2
# Graph fragment:
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, -100), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_2,), kwargs = {})
# %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, -100), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %convert_element_type), kwargs = {})
triton_per_fused_nll_loss_forward_2 = async_compile.triton('triton_per_fused_nll_loss_forward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_nll_loss_forward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_nll_loss_forward_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 != tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tmp5 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp6 = tmp4 + tmp5
tmp7 = tmp4 < 0
tmp8 = tl.where(tmp7, tmp6, tmp4)
tl.device_assert((0 <= tmp8) & (tmp8 < 4), "index out of bounds: 0 <= tmp8 < 4")
tmp10 = tl.load(in_ptr1 + (tmp8 + (4*r0)), None, eviction_policy='evict_last')
tmp11 = -tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp2, tmp11, tmp12)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp2.to(tl.int64)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = tmp20.to(tl.float32)
tmp22 = tmp16 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [mask, sub, positive_weighting, mul, sub_2, mul_1, sub_1, add, negative_weighting, mul_2, sub_3, sub_4, mul_3, logits, loss], Original ATen: [aten.scatter, aten.rsub, aten.clamp, aten.mul, aten.sub, aten.add, aten.where, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0.run(arg1_1, arg0_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mask, sub, positive_weighting, mul, sub_2, mul_1, sub_1, add, negative_weighting, mul_2, sub_3, sub_4, mul_3, logits, loss], Original ATen: [aten.scatter, aten.rsub, aten.clamp, aten.mul, aten.sub, aten.add, aten.where, aten._log_softmax]
triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1.run(arg1_1, arg0_1, buf0, buf1, buf2, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.nll_loss_forward]
triton_per_fused_nll_loss_forward_2.run(buf5, arg1_1, buf2, 1, 4, grid=grid(1), stream=stream0)
del arg1_1
del buf2
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
from typing import Tuple
from torch.nn.functional import cross_entropy
from itertools import product as product
from math import sqrt as sqrt
class ClassificationCircleLoss(nn.Module):
"""Circle loss for class-level labels as described in the paper
`"Circle Loss: A Unified Perspective of Pair Similarity Optimization" <#>`_
Args:
scale (float): the scale factor. Default: 256.0
margin (float): the relax margin value. Default: 0.25
circle_center (tuple[float]): the center of the circle (logit_ap, logit_an). Default: (1, 0)
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
"""
def __init__(self, scale: 'float'=256.0, margin: 'float'=0.25,
circle_center: 'Tuple[float, float]'=(1, 0), reduction: 'str'='mean'
) ->None:
super(ClassificationCircleLoss, self).__init__()
self.scale = scale
self.margin = margin
self.circle_center = circle_center
self.reduction = reduction
def forward(self, logits: 'torch.Tensor', targets: 'torch.LongTensor'
) ->torch.Tensor:
"""
Args:
logits (torch.Tensor): The predicted logits before softmax,
namely :math:`\\cos \\theta` in the above equation, with shape of :math:`(N, C)`
targets (torch.LongTensor): The ground-truth label long vector,
namely :math:`y` in the above equation, with shape of :math:`(N,)`
Returns:
torch.Tensor: loss
the computed loss
"""
mask = torch.zeros(logits.shape, dtype=torch.bool, device=logits.device
).scatter_(dim=1, index=targets.unsqueeze(1), value=1)
positive_weighting = torch.clamp(self.circle_center[0] + self.
margin - logits.detach(), min=0)
negative_weighting = torch.clamp(logits.detach() - self.
circle_center[1] + self.margin, min=0)
logits = torch.where(mask, self.scale * positive_weighting * (
logits - (self.circle_center[0] - self.margin)), self.scale *
negative_weighting * (logits - self.circle_center[1] - self.margin)
)
loss = cross_entropy(input=logits, target=targets, reduction=self.
reduction)
return loss
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype=
torch.int64)]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
from typing import Tuple
from itertools import product as product
from math import sqrt as sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0(
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp67 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], True, tl.int1)
tmp4 = tl.full([1], False, tl.int1)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp6.to(tl.float32)
tmp8 = 1.25
tmp9 = tmp8 - tmp7
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = 256.0
tmp13 = tmp11 * tmp12
tmp14 = 0.75
tmp15 = tmp7 - tmp14
tmp16 = tmp13 * tmp15
tmp17 = tmp6 - tmp1
tmp18 = tmp17.to(tl.float32)
tmp19 = 0.25
tmp20 = tmp18 + tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp10)
tmp22 = tmp21 * tmp12
tmp23 = tmp18 - tmp19
tmp24 = tmp22 * tmp23
tmp25 = tl.where(tmp5, tmp16, tmp24)
tmp26 = tl.full([1], 1, tl.int64)
tmp27 = tmp0 == tmp26
tmp28 = tl.where(tmp27, tmp3, tmp4)
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp8 - tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp10)
tmp33 = tmp32 * tmp12
tmp34 = tmp30 - tmp14
tmp35 = tmp33 * tmp34
tmp36 = tmp29 - tmp1
tmp37 = tmp36.to(tl.float32)
tmp38 = tmp37 + tmp19
tmp39 = triton_helpers.maximum(tmp38, tmp10)
tmp40 = tmp39 * tmp12
tmp41 = tmp37 - tmp19
tmp42 = tmp40 * tmp41
tmp43 = tl.where(tmp28, tmp35, tmp42)
tmp44 = triton_helpers.maximum(tmp25, tmp43)
tmp45 = tl.full([1], 2, tl.int64)
tmp46 = tmp0 == tmp45
tmp47 = tl.where(tmp46, tmp3, tmp4)
tmp49 = tmp48.to(tl.float32)
tmp50 = tmp8 - tmp49
tmp51 = triton_helpers.maximum(tmp50, tmp10)
tmp52 = tmp51 * tmp12
tmp53 = tmp49 - tmp14
tmp54 = tmp52 * tmp53
tmp55 = tmp48 - tmp1
tmp56 = tmp55.to(tl.float32)
tmp57 = tmp56 + tmp19
tmp58 = triton_helpers.maximum(tmp57, tmp10)
tmp59 = tmp58 * tmp12
tmp60 = tmp56 - tmp19
tmp61 = tmp59 * tmp60
tmp62 = tl.where(tmp47, tmp54, tmp61)
tmp63 = triton_helpers.maximum(tmp44, tmp62)
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp0 == tmp64
tmp66 = tl.where(tmp65, tmp3, tmp4)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp8 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp10)
tmp71 = tmp70 * tmp12
tmp72 = tmp68 - tmp14
tmp73 = tmp71 * tmp72
tmp74 = tmp67 - tmp1
tmp75 = tmp74.to(tl.float32)
tmp76 = tmp75 + tmp19
tmp77 = triton_helpers.maximum(tmp76, tmp10)
tmp78 = tmp77 * tmp12
tmp79 = tmp75 - tmp19
tmp80 = tmp78 * tmp79
tmp81 = tl.where(tmp66, tmp73, tmp80)
tmp82 = triton_helpers.maximum(tmp63, tmp81)
tmp83 = tmp25 - tmp82
tmp84 = tl_math.exp(tmp83)
tmp85 = tmp43 - tmp82
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp84 + tmp86
tmp88 = tmp62 - tmp82
tmp89 = tl_math.exp(tmp88)
tmp90 = tmp87 + tmp89
tmp91 = tmp81 - tmp82
tmp92 = tl_math.exp(tmp91)
tmp93 = tmp90 + tmp92
tl.store(out_ptr0 + x0, tmp82, xmask)
tl.store(out_ptr1 + x0, tmp93, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x2, xmask)
tmp27 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], True, tl.int1)
tmp4 = tl.full([1], False, tl.int1)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp6.to(tl.float32)
tmp8 = 1.25
tmp9 = tmp8 - tmp7
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = 256.0
tmp13 = tmp11 * tmp12
tmp14 = 0.75
tmp15 = tmp7 - tmp14
tmp16 = tmp13 * tmp15
tmp17 = tl.full([1], 0, tl.int64)
tmp18 = tmp6 - tmp17
tmp19 = tmp18.to(tl.float32)
tmp20 = 0.25
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp21, tmp10)
tmp23 = tmp22 * tmp12
tmp24 = tmp19 - tmp20
tmp25 = tmp23 * tmp24
tmp26 = tl.where(tmp5, tmp16, tmp25)
tmp28 = tmp26 - tmp27
tmp30 = tl_math.log(tmp29)
tmp31 = tmp28 - tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_per_fused_nll_loss_forward_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 != tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tmp5 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp6 = tmp4 + tmp5
tmp7 = tmp4 < 0
tmp8 = tl.where(tmp7, tmp6, tmp4)
tl.device_assert((0 <= tmp8) & (tmp8 < 4),
'index out of bounds: 0 <= tmp8 < 4')
tmp10 = tl.load(in_ptr1 + (tmp8 + 4 * r0), None, eviction_policy=
'evict_last')
tmp11 = -tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp2, tmp11, tmp12)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp2.to(tl.int64)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = tmp20.to(tl.float32)
tmp22 = tmp16 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0[
grid(4)](arg1_1, arg0_1, buf0, buf1, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1[
grid(16)](arg1_1, arg0_1, buf0, buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf3
del buf3
triton_per_fused_nll_loss_forward_2[grid(1)](buf5, arg1_1, buf2, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf2
return buf5,
class ClassificationCircleLossNew(nn.Module):
"""Circle loss for class-level labels as described in the paper
`"Circle Loss: A Unified Perspective of Pair Similarity Optimization" <#>`_
Args:
scale (float): the scale factor. Default: 256.0
margin (float): the relax margin value. Default: 0.25
circle_center (tuple[float]): the center of the circle (logit_ap, logit_an). Default: (1, 0)
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
"""
def __init__(self, scale: 'float'=256.0, margin: 'float'=0.25,
circle_center: 'Tuple[float, float]'=(1, 0), reduction: 'str'='mean'
) ->None:
super(ClassificationCircleLossNew, self).__init__()
self.scale = scale
self.margin = margin
self.circle_center = circle_center
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lingtengqiu/LearnableTreeFilterV2 | ClassificationCircleLoss | false | 7,096 | [
"Apache-2.0"
] | 1 | 3814a5a84c0a5c33d6538749eaf5aed4827366de | https://github.com/lingtengqiu/LearnableTreeFilterV2/tree/3814a5a84c0a5c33d6538749eaf5aed4827366de | import torch
import torch.nn as nn
import torch.utils.data
from typing import Tuple
from torch.nn.functional import cross_entropy
from itertools import product as product
from math import sqrt as sqrt
class Model(nn.Module):
"""Circle loss for class-level labels as described in the paper
`"Circle Loss: A Unified Perspective of Pair Similarity Optimization" <#>`_
Args:
scale (float): the scale factor. Default: 256.0
margin (float): the relax margin value. Default: 0.25
circle_center (tuple[float]): the center of the circle (logit_ap, logit_an). Default: (1, 0)
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
"""
def __init__(self, scale: 'float'=256.0, margin: 'float'=0.25,
circle_center: 'Tuple[float, float]'=(1, 0), reduction: 'str'='mean'
) ->None:
super().__init__()
self.scale = scale
self.margin = margin
self.circle_center = circle_center
self.reduction = reduction
def forward(self, logits: 'torch.Tensor', targets: 'torch.LongTensor'
) ->torch.Tensor:
"""
Args:
logits (torch.Tensor): The predicted logits before softmax,
namely :math:`\\cos \\theta` in the above equation, with shape of :math:`(N, C)`
targets (torch.LongTensor): The ground-truth label long vector,
namely :math:`y` in the above equation, with shape of :math:`(N,)`
Returns:
torch.Tensor: loss
the computed loss
"""
mask = torch.zeros(logits.shape, dtype=torch.bool, device=logits.device
).scatter_(dim=1, index=targets.unsqueeze(1), value=1)
positive_weighting = torch.clamp(self.circle_center[0] + self.
margin - logits.detach(), min=0)
negative_weighting = torch.clamp(logits.detach() - self.
circle_center[1] + self.margin, min=0)
logits = torch.where(mask, self.scale * positive_weighting * (
logits - (self.circle_center[0] - self.margin)), self.scale *
negative_weighting * (logits - self.circle_center[1] - self.margin)
)
loss = cross_entropy(input=logits, target=targets, reduction=self.
reduction)
return loss
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype=
torch.int64)]
def get_init_inputs():
return []
|
MeanVarFC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xl/cxltji5v4pd435j44wezjn5dhexfkx4wrcadp35cffwfzsbaisad.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 4, 8), (128, 32, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 8), (128, 32, 8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MeanVarFC(nn.Module):
def __init__(self, input_shape):
super(MeanVarFC, self).__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, x):
x = x + self.param
return x
def get_inputs():
return [torch.rand([4, 4, 4, 8])]
def get_init_inputs():
return [[], {'input_shape': [4, 4]}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 4, 8), (128, 32, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(512)](primals_2, primals_1, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class MeanVarFCNew(nn.Module):
def __init__(self, input_shape):
super(MeanVarFCNew, self).__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, input_0):
primals_1 = self.param
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| lingzenan/invertible-resnet | MeanVarFC | false | 7,097 | [
"MIT"
] | 1 | 57b1c0de51a885aed074b77628f3b0c85c548e70 | https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_shape):
super().__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, x):
x = x + self.param
return x
def get_inputs():
return [torch.rand([4, 4, 4, 8])]
def get_init_inputs():
return []
|
SeparableConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
return (buf2, primals_1, primals_2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch.nn.functional as F
from itertools import product as product
from math import sqrt as sqrt
class Conv2dSamePadding(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
self.padding_method = kwargs.pop('padding', None)
if self.padding_method is None:
if len(args) >= 5:
self.padding_method = args[4]
else:
self.padding_method = 0
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
super().__init__(*args, **kwargs, padding=0)
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
if isinstance(self.dilation, int):
self.dilation = [self.dilation] * 2
elif len(self.dilation) == 1:
self.dilation = [self.dilation[0]] * 2
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
else:
super().__init__(*args, **kwargs, padding=self.padding_method)
self.norm = norm
self.activation = activation
def forward(self, x):
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
input_h, input_w = x.shape[-2:]
stride_h, stride_w = self.stride
kernel_size_h, kernel_size_w = self.kernel_size
dilation_h, dilation_w = self.dilation
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
padding_needed_h = max(0, (output_h - 1) * stride_h + (
kernel_size_h - 1) * dilation_h + 1 - input_h)
padding_needed_w = max(0, (output_w - 1) * stride_w + (
kernel_size_w - 1) * dilation_w + 1 - input_w)
left = padding_needed_w // 2
right = padding_needed_w - left
top = padding_needed_h // 2
bottom = padding_needed_h - top
x = F.pad(x, [left, right, top, bottom])
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class SeparableConvBlock(torch.nn.Module):
"""
Depthwise seperable convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True, norm=None, activation=None):
"""
Args:
in_channels (int): the number of input tensor channels.
out_channels (int):the number of output tensor channels.
kernel_size (int): the kernel size.
stride (int or tuple or list): the stride.
bias (bool): if `True`, the pointwise conv applies bias.
apply_bn (bool): if `True`, apply BN layer after conv layer.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
super(SeparableConvBlock, self).__init__()
self.norm = norm
self.activation = activation
self.depthwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=in_channels, kernel_size=kernel_size, stride=
stride, padding=padding, dilation=dilation, groups=in_channels,
bias=False)
self.pointwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=out_channels, kernel_size=1, stride=1, padding=0,
dilation=1, groups=1, bias=bias)
if bias:
self.bias = self.pointwise.bias
def forward(self, inputs):
x = self.depthwise(inputs)
x = self.pointwise(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.utils.data
import torch.nn.functional as F
from itertools import product as product
from math import sqrt as sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf2, primals_4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_4
return buf2, primals_1, primals_2, primals_3, buf0
class Conv2dSamePadding(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
self.padding_method = kwargs.pop('padding', None)
if self.padding_method is None:
if len(args) >= 5:
self.padding_method = args[4]
else:
self.padding_method = 0
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
super().__init__(*args, **kwargs, padding=0)
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
if isinstance(self.dilation, int):
self.dilation = [self.dilation] * 2
elif len(self.dilation) == 1:
self.dilation = [self.dilation[0]] * 2
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
else:
super().__init__(*args, **kwargs, padding=self.padding_method)
self.norm = norm
self.activation = activation
def forward(self, x):
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
input_h, input_w = x.shape[-2:]
stride_h, stride_w = self.stride
kernel_size_h, kernel_size_w = self.kernel_size
dilation_h, dilation_w = self.dilation
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
padding_needed_h = max(0, (output_h - 1) * stride_h + (
kernel_size_h - 1) * dilation_h + 1 - input_h)
padding_needed_w = max(0, (output_w - 1) * stride_w + (
kernel_size_w - 1) * dilation_w + 1 - input_w)
left = padding_needed_w // 2
right = padding_needed_w - left
top = padding_needed_h // 2
bottom = padding_needed_h - top
x = F.pad(x, [left, right, top, bottom])
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class SeparableConvBlockNew(torch.nn.Module):
"""
Depthwise seperable convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True, norm=None, activation=None):
"""
Args:
in_channels (int): the number of input tensor channels.
out_channels (int):the number of output tensor channels.
kernel_size (int): the kernel size.
stride (int or tuple or list): the stride.
bias (bool): if `True`, the pointwise conv applies bias.
apply_bn (bool): if `True`, apply BN layer after conv layer.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
super(SeparableConvBlockNew, self).__init__()
self.norm = norm
self.activation = activation
self.depthwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=in_channels, kernel_size=kernel_size, stride=
stride, padding=padding, dilation=dilation, groups=in_channels,
bias=False)
self.pointwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=out_channels, kernel_size=1, stride=1, padding=0,
dilation=1, groups=1, bias=bias)
if bias:
self.bias = self.pointwise.bias
def forward(self, input_0):
primals_4 = self.bias
primals_1 = self.depthwise.weight
primals_3 = self.pointwise.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| lingtengqiu/LearnableTreeFilterV2 | SeparableConvBlock | false | 7,098 | [
"Apache-2.0"
] | 1 | 3814a5a84c0a5c33d6538749eaf5aed4827366de | https://github.com/lingtengqiu/LearnableTreeFilterV2/tree/3814a5a84c0a5c33d6538749eaf5aed4827366de | import math
import torch
import torch.utils.data
import torch.nn.functional as F
from itertools import product as product
from math import sqrt as sqrt
class Conv2dSamePadding(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
self.padding_method = kwargs.pop('padding', None)
if self.padding_method is None:
if len(args) >= 5:
self.padding_method = args[4]
else:
self.padding_method = 0
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
super().__init__(*args, **kwargs, padding=0)
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
if isinstance(self.dilation, int):
self.dilation = [self.dilation] * 2
elif len(self.dilation) == 1:
self.dilation = [self.dilation[0]] * 2
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
else:
super().__init__(*args, **kwargs, padding=self.padding_method)
self.norm = norm
self.activation = activation
def forward(self, x):
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
input_h, input_w = x.shape[-2:]
stride_h, stride_w = self.stride
kernel_size_h, kernel_size_w = self.kernel_size
dilation_h, dilation_w = self.dilation
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
padding_needed_h = max(0, (output_h - 1) * stride_h + (
kernel_size_h - 1) * dilation_h + 1 - input_h)
padding_needed_w = max(0, (output_w - 1) * stride_w + (
kernel_size_w - 1) * dilation_w + 1 - input_w)
left = padding_needed_w // 2
right = padding_needed_w - left
top = padding_needed_h // 2
bottom = padding_needed_h - top
x = F.pad(x, [left, right, top, bottom])
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class Model(torch.nn.Module):
"""
Depthwise seperable convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True, norm=None, activation=None):
"""
Args:
in_channels (int): the number of input tensor channels.
out_channels (int):the number of output tensor channels.
kernel_size (int): the kernel size.
stride (int or tuple or list): the stride.
bias (bool): if `True`, the pointwise conv applies bias.
apply_bn (bool): if `True`
# ... truncated (>4000 chars) for memory efficiency |
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wl/cwl3r3y2pgt376us4loda5kjeqzzwgynhcjamefpeshkd44ofpoz.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_5, 1.0), kwargs = {})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ym/cym2aoh2473azgl5qufcsfqa4qgp5bji7hptnotbmkegfc7h3xan.py
# Topologically Sorted Source Nodes: [contiguous_2, view_5], Original ATen: [aten.clone, aten.view]
# Source node to ATen node mapping:
# contiguous_2 => clone
# view_5 => view_20
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %view_20 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [-1, 4]), kwargs = {})
triton_poi_fused_clone_view_3 = async_compile.triton('triton_poi_fused_clone_view_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_view_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_4, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf3, 64, grid=grid(64), stream=stream0)
buf4 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_2, view_5], Original ATen: [aten.clone, aten.view]
triton_poi_fused_clone_view_3.run(buf8, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 4), (4, 1), 0), out=buf10)
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), primals_1, buf7, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf9, (4, 16), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import numpy as np
from torch import nn
class MultiHeadAttention(nn.Module):
def __init__(self, n_heads, input_dim, embed_dim, val_dim=None, key_dim
=None):
super(MultiHeadAttention, self).__init__()
if val_dim is None:
val_dim = embed_dim // n_heads
if key_dim is None:
key_dim = val_dim
self.n_heads = n_heads
self.input_dim = input_dim
self.embed_dim = embed_dim
self.val_dim = val_dim
self.key_dim = key_dim
self.norm_factor = 1 / math.sqrt(key_dim)
self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim))
self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1.0 / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, q, h=None, mask=None):
"""
:param q: queries (batch_size, n_query, input_dim)
:param h: data (batch_size, graph_size, input_dim)
:param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)
Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)
:return:
"""
if h is None:
h = q
batch_size, graph_size, input_dim = h.size()
n_query = q.size(1)
assert q.size(0) == batch_size
assert q.size(2) == input_dim
assert input_dim == self.input_dim, 'Wrong embedding dimension of input'
hflat = h.contiguous().view(-1, input_dim)
qflat = q.contiguous().view(-1, input_dim)
shp = self.n_heads, batch_size, graph_size, -1
shp_q = self.n_heads, batch_size, n_query, -1
Q = torch.matmul(qflat, self.W_query).view(shp_q)
K = torch.matmul(hflat, self.W_key).view(shp)
V = torch.matmul(hflat, self.W_val).view(shp)
compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))
if mask is not None:
mask = mask.view(1, batch_size, n_query, graph_size).expand_as(
compatibility)
compatibility[mask] = -np.inf
attn = torch.softmax(compatibility, dim=-1)
if mask is not None:
attnc = attn.clone()
attnc[mask] = 0
attn = attnc
heads = torch.matmul(attn, V)
out = torch.mm(heads.permute(1, 2, 0, 3).contiguous().view(-1, self
.n_heads * self.val_dim), self.W_out.view(-1, self.embed_dim)
).view(batch_size, n_query, self.embed_dim)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_heads': 4, 'input_dim': 4, 'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_clone_view_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4,
1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4,
1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4,
1), 0), primals_4, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_0[grid(64)](buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf4 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_0[grid(64)](buf4, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_clone_view_3[grid(16, 4)](buf8, buf9, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 4), (4, 1
), 0), out=buf10)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), primals_1, buf7, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf9, (4, 16), (1, 4), 0), reinterpret_tensor(
primals_5, (4, 4), (1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, n_heads, input_dim, embed_dim, val_dim=None, key_dim
=None):
super(MultiHeadAttentionNew, self).__init__()
if val_dim is None:
val_dim = embed_dim // n_heads
if key_dim is None:
key_dim = val_dim
self.n_heads = n_heads
self.input_dim = input_dim
self.embed_dim = embed_dim
self.val_dim = val_dim
self.key_dim = key_dim
self.norm_factor = 1 / math.sqrt(key_dim)
self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim))
self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1.0 / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_2 = self.W_query
primals_3 = self.W_key
primals_4 = self.W_val
primals_5 = self.W_out
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| lin-bo/RL_back2depot_VRP | MultiHeadAttention | false | 7,099 | [
"MIT"
] | 1 | 2a159d1df221ff314d98d79b8fde2b739a454ff7 | https://github.com/lin-bo/RL_back2depot_VRP/tree/2a159d1df221ff314d98d79b8fde2b739a454ff7 | import math
import torch
import numpy as np
from torch import nn
class Model(nn.Module):
def __init__(self, n_heads, input_dim, embed_dim, val_dim=None, key_dim
=None):
super().__init__()
if val_dim is None:
val_dim = embed_dim // n_heads
if key_dim is None:
key_dim = val_dim
self.n_heads = n_heads
self.input_dim = input_dim
self.embed_dim = embed_dim
self.val_dim = val_dim
self.key_dim = key_dim
self.norm_factor = 1 / math.sqrt(key_dim)
self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim))
self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1.0 / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, q, h=None, mask=None):
"""
:param q: queries (batch_size, n_query, input_dim)
:param h: data (batch_size, graph_size, input_dim)
:param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)
Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)
:return:
"""
if h is None:
h = q
batch_size, graph_size, input_dim = h.size()
n_query = q.size(1)
assert q.size(0) == batch_size
assert q.size(2) == input_dim
assert input_dim == self.input_dim, 'Wrong embedding dimension of input'
hflat = h.contiguous().view(-1, input_dim)
qflat = q.contiguous().view(-1, input_dim)
shp = self.n_heads, batch_size, graph_size, -1
shp_q = self.n_heads, batch_size, n_query, -1
Q = torch.matmul(qflat, self.W_query).view(shp_q)
K = torch.matmul(hflat, self.W_key).view(shp)
V = torch.matmul(hflat, self.W_val).view(shp)
compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))
if mask is not None:
mask = mask.view(1, batch_size, n_query, graph_size).expand_as(
compatibility)
compatibility[mask] = -np.inf
attn = torch.softmax(compatibility, dim=-1)
if mask is not None:
attnc = attn.clone()
attnc[mask] = 0
attn = attnc
heads = torch.matmul(attn, V)
out = torch.mm(heads.permute(1, 2, 0, 3).contiguous().view(-1, self
.n_heads * self.val_dim), self.W_out.view(-1, self.embed_dim)
).view(batch_size, n_query, self.embed_dim)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
EDMLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/by/cbyierpm3pbebhkkiv5bu7g55geq4bts3id4zzn3makdomwbxyp7.py
# Topologically Sorted Source Nodes: [cdf_estimate], Original ATen: [aten.cumsum]
# Source node to ATen node mapping:
# cdf_estimate => cumsum_1
# Graph fragment:
# %cumsum_1 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%arg1_1, 1), kwargs = {})
triton_per_fused_cumsum_0 = async_compile.triton('triton_per_fused_cumsum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cd/ccdhthsqtt37pvrei4f455rqurz65zebemf34bybticrr3pz33wb.py
# Topologically Sorted Source Nodes: [cdf_diff, abs_1, pow_1, mean, samplewise_emd, mean_1], Original ATen: [aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt]
# Source node to ATen node mapping:
# abs_1 => abs_1
# cdf_diff => sub
# mean => mean
# mean_1 => mean_1
# pow_1 => pow_1
# samplewise_emd => sqrt
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cumsum_1, %cumsum), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_abs_mean_pow_sqrt_sub_1 = async_compile.triton('triton_per_fused_abs_mean_pow_sqrt_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_pow_sqrt_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_pow_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = 1.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cdf_estimate], Original ATen: [aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused_cumsum_0.run(arg1_1, buf0, 64, 4, grid=grid(64), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cdf_target], Original ATen: [aten.cumsum]
triton_per_fused_cumsum_0.run(arg0_1, buf1, 64, 4, grid=grid(64), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cdf_diff, abs_1, pow_1, mean, samplewise_emd, mean_1], Original ATen: [aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt]
triton_per_fused_abs_mean_pow_sqrt_sub_1.run(buf3, buf0, buf1, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim
class EDMLoss(nn.Module):
def __init__(self):
super(EDMLoss, self).__init__()
def forward(self, p_target: 'torch.Tensor', p_estimate: 'torch.Tensor'):
assert p_target.shape == p_estimate.shape
cdf_target = torch.cumsum(p_target, dim=1)
cdf_estimate = torch.cumsum(p_estimate, dim=1)
cdf_diff = cdf_estimate - cdf_target
samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff
), 2)))
return samplewise_emd.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl
.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp3, xmask)
@triton.jit
def triton_per_fused_abs_mean_pow_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = 1.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_0[grid(64)](arg1_1, buf0, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_cumsum_0[grid(64)](arg0_1, buf1, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_abs_mean_pow_sqrt_sub_1[grid(1)](buf3, buf0, buf1,
1, 256, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class EDMLossNew(nn.Module):
def __init__(self):
super(EDMLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lishiyu0088/Neural_Bradley-Terry | EDMLoss | false | 7,100 | [
"MIT"
] | 1 | ea2108267cf24c1fcfdf432e70810283d90495af | https://github.com/lishiyu0088/Neural_Bradley-Terry/tree/ea2108267cf24c1fcfdf432e70810283d90495af | import torch
import torch.nn as nn
import torch.optim
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, p_target: 'torch.Tensor', p_estimate: 'torch.Tensor'):
assert p_target.shape == p_estimate.shape
cdf_target = torch.cumsum(p_target, dim=1)
cdf_estimate = torch.cumsum(p_estimate, dim=1)
cdf_diff = cdf_estimate - cdf_target
samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff
), 2)))
return samplewise_emd.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ActNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nz/cnzkzycmku6msmsiehonl6lvkxkalhor6qvech326gpy2lqc3hoy.py
# Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# logdet => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%unsqueeze,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/w6/cw6fwvvd65omur5x74r5pig6wm7q3m5ho7q4pfmcjdleaujxnwkx.py
# Topologically Sorted Source Nodes: [exp, mul, add], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul => mul
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%unsqueeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(primals_1, buf0, 1, 4, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, mul, add], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_1.run(primals_2, primals_1, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class ActNorm(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum()
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](primals_1, buf0, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, primals_1,
primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf1, buf0, primals_1, primals_2
class ActNormNew(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNormNew, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def forward(self, input_0):
primals_1 = self._log_scale
primals_3 = self._shift
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| lingzenan/invertible-resnet | ActNorm | false | 7,101 | [
"MIT"
] | 1 | 57b1c0de51a885aed074b77628f3b0c85c548e70 | https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70 | import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum()
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ActNorm2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mk/cmkfq3mg2sl66skmhudljmlrjckeersi3ye3frqm2bl7felzsn2n.py
# Topologically Sorted Source Nodes: [sum_1, mul, logdet], Original ATen: [aten.sum, aten.mul]
# Source node to ATen node mapping:
# logdet => mul_1
# mul => mul
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%unsqueeze_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 4), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ok/cok2tne7527mk7p2rdtta5caje65jdloa27vp4xewaco2m24lfgp.py
# Topologically Sorted Source Nodes: [exp, mul_2, add], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul_2 => mul_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%unsqueeze_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_5), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_1, mul, logdet], Original ATen: [aten.sum, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(buf2, primals_1, 1, 4, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, mul_2, add], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_1.run(primals_2, primals_1, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, buf2, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class ActNorm2D(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm2D, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :, None, None]
def shift(self):
return self._shift[None, :, None, None]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :, None, None]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum() * x.size(2) * x.size(3)
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(1)](buf2, primals_1, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, primals_1,
primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf1, buf2, primals_1, primals_2
class ActNorm2DNew(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm2DNew, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :, None, None]
def shift(self):
return self._shift[None, :, None, None]
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def forward(self, input_0):
primals_1 = self._log_scale
primals_3 = self._shift
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| lingzenan/invertible-resnet | ActNorm2D | false | 7,102 | [
"MIT"
] | 1 | 57b1c0de51a885aed074b77628f3b0c85c548e70 | https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70 | import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :, None, None]
def shift(self):
return self._shift[None, :, None, None]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :, None, None]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum() * x.size(2) * x.size(3)
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SigmoidFocalClassificationLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/af/caf66esntjl5pu47g5abaylnivixxlc2i43ygyzcmkfj4xuk7jrk.py
# Topologically Sorted Source Nodes: [prediction_probabilities, mul_1, sub_1, sub_2, mul_2, p_t, sub_3, modulating_factor, mul_3, sub_4, mul_4, alpha_weight_factor, mul_5, clamp, mul, loss, abs_1, neg, exp, log1p, loss_1, focal_cross_entropy_loss, mul_7], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add, aten.pow, aten.clamp, aten.sub, aten.abs, aten.neg, aten.exp, aten.log1p]
# Source node to ATen node mapping:
# abs_1 => abs_1
# alpha_weight_factor => add_2
# clamp => clamp_min
# exp => exp
# focal_cross_entropy_loss => mul_6
# log1p => log1p
# loss => sub
# loss_1 => add
# modulating_factor => pow_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_7 => mul_7
# neg => neg
# p_t => add_1
# prediction_probabilities => sigmoid
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sigmoid), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %sub_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %add_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, 0.75), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %add_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %mul), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %log1p), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %arg2_1), kwargs = {})
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp27 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = tmp4 - tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp9 = tmp4 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 0.25
tmp12 = tmp0 * tmp11
tmp13 = 0.75
tmp14 = tmp5 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp10 * tmp15
tmp17 = 0.0
tmp18 = triton_helpers.maximum(tmp1, tmp17)
tmp19 = tmp1 * tmp0
tmp20 = tmp18 - tmp19
tmp21 = tl_math.abs(tmp1)
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = libdevice.log1p(tmp23)
tmp25 = tmp20 + tmp24
tmp26 = tmp16 * tmp25
tmp28 = tmp26 * tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prediction_probabilities, mul_1, sub_1, sub_2, mul_2, p_t, sub_3, modulating_factor, mul_3, sub_4, mul_4, alpha_weight_factor, mul_5, clamp, mul, loss, abs_1, neg, exp, log1p, loss_1, focal_cross_entropy_loss, mul_7], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add, aten.pow, aten.clamp, aten.sub, aten.abs, aten.neg, aten.exp, aten.log1p]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def _sigmoid_cross_entropy_with_logits(logits, labels):
loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits)
loss += torch.log1p(torch.exp(-torch.abs(logits)))
return loss
class SigmoidFocalClassificationLoss(nn.Module):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
super().__init__()
self._alpha = alpha
self._gamma = gamma
def forward(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
per_entry_cross_ent = _sigmoid_cross_entropy_with_logits(labels=
target_tensor, logits=prediction_tensor)
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = target_tensor * prediction_probabilities + (1 - target_tensor
) * (1 - prediction_probabilities)
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = target_tensor * self._alpha + (1 -
target_tensor) * (1 - self._alpha)
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0(
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp27 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = tmp4 - tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp9 = tmp4 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 0.25
tmp12 = tmp0 * tmp11
tmp13 = 0.75
tmp14 = tmp5 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp10 * tmp15
tmp17 = 0.0
tmp18 = triton_helpers.maximum(tmp1, tmp17)
tmp19 = tmp1 * tmp0
tmp20 = tmp18 - tmp19
tmp21 = tl_math.abs(tmp1)
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = libdevice.log1p(tmp23)
tmp25 = tmp20 + tmp24
tmp26 = tmp16 * tmp25
tmp28 = tmp26 * tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0[
grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
def _sigmoid_cross_entropy_with_logits(logits, labels):
loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits)
loss += torch.log1p(torch.exp(-torch.abs(logits)))
return loss
class SigmoidFocalClassificationLossNew(nn.Module):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
super().__init__()
self._alpha = alpha
self._gamma = gamma
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| liuhuaijjin/rpn_rois_proposals_layers | SigmoidFocalClassificationLoss | false | 7,103 | [
"MIT"
] | 1 | c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | https://github.com/liuhuaijjin/rpn_rois_proposals_layers/tree/c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | import torch
import torch.nn as nn
def _sigmoid_cross_entropy_with_logits(logits, labels):
loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits)
loss += torch.log1p(torch.exp(-torch.abs(logits)))
return loss
class Model(nn.Module):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
super().__init__()
self._alpha = alpha
self._gamma = gamma
def forward(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
per_entry_cross_ent = _sigmoid_cross_entropy_with_logits(labels=
target_tensor, logits=prediction_tensor)
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = target_tensor * prediction_probabilities + (1 - target_tensor
) * (1 - prediction_probabilities)
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = target_tensor * self._alpha + (1 -
target_tensor) * (1 - self._alpha)
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
GAT | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rn/crnvqrppxb3ocltksxzamskmx3mpvscqlqbanvhmgkmj5b53kfuf.py
# Topologically Sorted Source Nodes: [add, e], Original ATen: [aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# add => add
# e => gt
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %permute), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
triton_poi_fused_add_leaky_relu_0 = async_compile.triton('triton_poi_fused_add_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k2/ck2qvrsga6qzh3n4zrcqhgn3gcw55gg5hx55rqebdhhoptcty66e.py
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
# Source node to ATen node mapping:
# gt => gt_1
# Graph fragment:
# %gt_1 : [num_users=5] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_5, 0), kwargs = {})
triton_poi_fused_gt_1 = async_compile.triton('triton_poi_fused_gt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7p/c7pzocdj4z66742c4x73l2mmqga3yo74menkparuudjxrge7crrh.py
# Topologically Sorted Source Nodes: [add, e, zero_vec, attention, attention_1, add_1, e_1, attention_3, attention_4, add_2, e_2, attention_6, attention_7, add_3, e_3, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# attention => where_1
# attention_1 => amax
# attention_10 => amax_3
# attention_3 => where_4
# attention_4 => amax_1
# attention_6 => where_7
# attention_7 => amax_2
# attention_9 => where_10
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %permute), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {})
# %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [1], True), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_5, %permute_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %add_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [1], True), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_9, %permute_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %add_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_7, [1], True), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_13, %permute_3), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_10, [1], True), kwargs = {})
triton_poi_fused__softmax_add_leaky_relu_mul_where_2 = async_compile.triton('triton_poi_fused__softmax_add_leaky_relu_mul_where_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: '*fp32', 10: '*i1', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_leaky_relu_mul_where_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 40, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tl.load(in_ptr3 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr3 + (1))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp22 = tl.load(in_ptr3 + (2))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp29 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp31 = tl.load(in_ptr3 + (3))
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp38 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr5 + (x0), xmask)
tmp40 = tl.load(in_ptr6 + (0))
tmp41 = tl.broadcast_to(tmp40, [XBLOCK])
tmp46 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp47 = tl.load(in_ptr6 + (1))
tmp48 = tl.broadcast_to(tmp47, [XBLOCK])
tmp54 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp55 = tl.load(in_ptr6 + (2))
tmp56 = tl.broadcast_to(tmp55, [XBLOCK])
tmp62 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp63 = tl.load(in_ptr6 + (3))
tmp64 = tl.broadcast_to(tmp63, [XBLOCK])
tmp70 = tl.load(in_ptr7 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp71 = tl.load(in_ptr8 + (x0), xmask)
tmp72 = tl.load(in_ptr9 + (0))
tmp73 = tl.broadcast_to(tmp72, [XBLOCK])
tmp78 = tl.load(in_ptr7 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp79 = tl.load(in_ptr9 + (1))
tmp80 = tl.broadcast_to(tmp79, [XBLOCK])
tmp86 = tl.load(in_ptr7 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp87 = tl.load(in_ptr9 + (2))
tmp88 = tl.broadcast_to(tmp87, [XBLOCK])
tmp94 = tl.load(in_ptr7 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp95 = tl.load(in_ptr9 + (3))
tmp96 = tl.broadcast_to(tmp95, [XBLOCK])
tmp102 = tl.load(in_ptr10 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp103 = tl.load(in_ptr11 + (x0), xmask)
tmp104 = tl.load(in_ptr12 + (0))
tmp105 = tl.broadcast_to(tmp104, [XBLOCK])
tmp110 = tl.load(in_ptr10 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp111 = tl.load(in_ptr12 + (1))
tmp112 = tl.broadcast_to(tmp111, [XBLOCK])
tmp118 = tl.load(in_ptr10 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp119 = tl.load(in_ptr12 + (2))
tmp120 = tl.broadcast_to(tmp119, [XBLOCK])
tmp126 = tl.load(in_ptr10 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp127 = tl.load(in_ptr12 + (3))
tmp128 = tl.broadcast_to(tmp127, [XBLOCK])
tmp5 = tmp2 + tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tl.where(tmp1, tmp5, tmp7)
tmp9 = -8999999815811072.0
tmp10 = tl.where(tmp0, tmp8, tmp9)
tmp15 = tmp2 + tmp14
tmp16 = tmp15 * tmp6
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = tl.where(tmp11, tmp17, tmp9)
tmp19 = triton_helpers.maximum(tmp10, tmp18)
tmp24 = tmp2 + tmp23
tmp25 = tmp24 * tmp6
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp20, tmp26, tmp9)
tmp28 = triton_helpers.maximum(tmp19, tmp27)
tmp33 = tmp2 + tmp32
tmp34 = tmp33 * tmp6
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp29, tmp35, tmp9)
tmp37 = triton_helpers.maximum(tmp28, tmp36)
tmp42 = tmp39 + tmp41
tmp43 = tmp42 * tmp6
tmp44 = tl.where(tmp38, tmp42, tmp43)
tmp45 = tl.where(tmp0, tmp44, tmp9)
tmp49 = tmp39 + tmp48
tmp50 = tmp49 * tmp6
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tl.where(tmp11, tmp51, tmp9)
tmp53 = triton_helpers.maximum(tmp45, tmp52)
tmp57 = tmp39 + tmp56
tmp58 = tmp57 * tmp6
tmp59 = tl.where(tmp54, tmp57, tmp58)
tmp60 = tl.where(tmp20, tmp59, tmp9)
tmp61 = triton_helpers.maximum(tmp53, tmp60)
tmp65 = tmp39 + tmp64
tmp66 = tmp65 * tmp6
tmp67 = tl.where(tmp62, tmp65, tmp66)
tmp68 = tl.where(tmp29, tmp67, tmp9)
tmp69 = triton_helpers.maximum(tmp61, tmp68)
tmp74 = tmp71 + tmp73
tmp75 = tmp74 * tmp6
tmp76 = tl.where(tmp70, tmp74, tmp75)
tmp77 = tl.where(tmp0, tmp76, tmp9)
tmp81 = tmp71 + tmp80
tmp82 = tmp81 * tmp6
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tl.where(tmp11, tmp83, tmp9)
tmp85 = triton_helpers.maximum(tmp77, tmp84)
tmp89 = tmp71 + tmp88
tmp90 = tmp89 * tmp6
tmp91 = tl.where(tmp86, tmp89, tmp90)
tmp92 = tl.where(tmp20, tmp91, tmp9)
tmp93 = triton_helpers.maximum(tmp85, tmp92)
tmp97 = tmp71 + tmp96
tmp98 = tmp97 * tmp6
tmp99 = tl.where(tmp94, tmp97, tmp98)
tmp100 = tl.where(tmp29, tmp99, tmp9)
tmp101 = triton_helpers.maximum(tmp93, tmp100)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 * tmp6
tmp108 = tl.where(tmp102, tmp106, tmp107)
tmp109 = tl.where(tmp0, tmp108, tmp9)
tmp113 = tmp103 + tmp112
tmp114 = tmp113 * tmp6
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.where(tmp11, tmp115, tmp9)
tmp117 = triton_helpers.maximum(tmp109, tmp116)
tmp121 = tmp103 + tmp120
tmp122 = tmp121 * tmp6
tmp123 = tl.where(tmp118, tmp121, tmp122)
tmp124 = tl.where(tmp20, tmp123, tmp9)
tmp125 = triton_helpers.maximum(tmp117, tmp124)
tmp129 = tmp103 + tmp128
tmp130 = tmp129 * tmp6
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.where(tmp29, tmp131, tmp9)
tmp133 = triton_helpers.maximum(tmp125, tmp132)
tl.store(out_ptr0 + (x0), tmp37, xmask)
tl.store(out_ptr1 + (x0), tmp69, xmask)
tl.store(out_ptr2 + (x0), tmp101, xmask)
tl.store(out_ptr3 + (x0), tmp133, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/si/csiextwu44e63m7askimz3girudboqtyp45f2wu2wmll5iovqchv.py
# Topologically Sorted Source Nodes: [add, e, zero_vec, attention, attention_1, add_1, e_1, attention_3, attention_4, add_2, e_2, attention_6, attention_7, add_3, e_3, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# attention => where_1
# attention_1 => exp, sub
# attention_10 => exp_3, sub_3
# attention_3 => where_4
# attention_4 => exp_1, sub_1
# attention_6 => where_7
# attention_7 => exp_2, sub_2
# attention_9 => where_10
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %permute), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {})
# %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_5, %permute_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %add_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_9, %permute_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %add_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_13, %permute_3), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
triton_poi_fused__softmax_add_leaky_relu_mul_where_3 = async_compile.triton('triton_poi_fused__softmax_add_leaky_relu_mul_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*i1', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*i1', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_leaky_relu_mul_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x2), xmask).to(tl.int1)
tmp14 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr9 + (x2), xmask).to(tl.int1)
tmp24 = tl.load(in_ptr10 + (x1), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr11 + (x0), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr12 + (x1), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr13 + (x2), xmask).to(tl.int1)
tmp34 = tl.load(in_ptr14 + (x1), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr15 + (x0), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr16 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tl.where(tmp1, tmp4, tmp6)
tmp8 = -8999999815811072.0
tmp9 = tl.where(tmp0, tmp7, tmp8)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tmp16 * tmp5
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tl.where(tmp0, tmp18, tmp8)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp5
tmp28 = tl.where(tmp23, tmp26, tmp27)
tmp29 = tl.where(tmp0, tmp28, tmp8)
tmp31 = tmp29 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp5
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.where(tmp0, tmp38, tmp8)
tmp41 = tmp39 - tmp40
tmp42 = tl_math.exp(tmp41)
tl.store(out_ptr0 + (x2), tmp12, xmask)
tl.store(out_ptr1 + (x2), tmp22, xmask)
tl.store(out_ptr2 + (x2), tmp32, xmask)
tl.store(out_ptr3 + (x2), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rr/crrmj7r54x5uk325xkhuskxp4m5prz3fpx53yc2st4o5pwbhq32p.py
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xp/cxpnlviefwxbdj7cbio4oqhkzb74qnjn5guhdplnmdcsr7cnbsyp.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where_2, %where_5, %where_8, %where_11], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tmp40 = tl.full([1], 16, tl.int64)
tmp41 = tmp0 < tmp40
tmp42 = tl.load(in_ptr3 + ((4*x1) + ((-12) + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + (x2), tmp52, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 1), (1, 1))
assert_size_stride(primals_8, (4, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 1), (1, 1))
assert_size_stride(primals_11, (4, 1), (1, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, 1), (1, 1))
assert_size_stride(primals_14, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_2], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add, e], Original ATen: [aten.add, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_leaky_relu_0.run(buf1, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
triton_poi_fused_gt_1.run(primals_5, buf4, 16, grid=grid(16), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_6, out=buf9)
del primals_6
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_3], Original ATen: [aten.mm]
extern_kernels.mm(buf9, primals_7, out=buf10)
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_4], Original ATen: [aten.mm]
extern_kernels.mm(buf9, primals_8, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add_1, e_1], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_0.run(buf10, buf11, buf12, 16, grid=grid(16), stream=stream0)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_9, out=buf17)
del primals_9
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_5], Original ATen: [aten.mm]
extern_kernels.mm(buf17, primals_10, out=buf18)
buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_6], Original ATen: [aten.mm]
extern_kernels.mm(buf17, primals_11, out=buf19)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add_2, e_2], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_0.run(buf18, buf19, buf20, 16, grid=grid(16), stream=stream0)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_12, out=buf25)
del primals_12
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_7], Original ATen: [aten.mm]
extern_kernels.mm(buf25, primals_13, out=buf26)
buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_8], Original ATen: [aten.mm]
extern_kernels.mm(buf25, primals_14, out=buf27)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add_3, e_3], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_0.run(buf26, buf27, buf28, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [add, e, zero_vec, attention, attention_1, add_1, e_1, attention_3, attention_4, add_2, e_2, attention_6, attention_7, add_3, e_3, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_add_leaky_relu_mul_where_2.run(buf4, buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19, buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, grid=grid(4), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, e, zero_vec, attention, attention_1, add_1, e_1, attention_3, attention_4, add_2, e_2, attention_6, attention_7, add_3, e_3, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_add_leaky_relu_mul_where_3.run(buf4, buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20, buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14, buf22, buf30, 16, grid=grid(16), stream=stream0)
del buf1
del buf10
del buf11
del buf13
del buf18
del buf19
del buf2
del buf21
del buf26
del buf27
del buf29
del buf5
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 16, grid=grid(16), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.mm]
extern_kernels.mm(buf7, buf0, out=buf8)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.mm]
extern_kernels.mm(buf15, buf9, out=buf16)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_7], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf22, buf23, 16, grid=grid(16), stream=stream0)
buf24 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.mm]
extern_kernels.mm(buf23, buf17, out=buf24)
buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_10], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf30, buf31, 16, grid=grid(16), stream=stream0)
buf32 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.mm]
extern_kernels.mm(buf31, buf25, out=buf32)
buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf8, buf16, buf24, buf32, buf33, 64, grid=grid(64), stream=stream0)
return (buf33, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(primals_14, (1, 4), (1, 1), 0), reinterpret_tensor(primals_13, (1, 4), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor(primals_11, (1, 4), (1, 1), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
in_features, out_features).type(torch.FloatTensor if torch.cuda
.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
out_features, 1).type(torch.FloatTensor if torch.cuda.
is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
out_features, 1).type(torch.FloatTensor if torch.cuda.
is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
h.size()[0]
f_1 = h @ self.a1
f_2 = h @ self.a2
e = self.leakyrelu(f_1 + f_2.transpose(0, 1))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GAT(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4,
'nheads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tl.load(in_ptr3 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr3 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp22 = tl.load(in_ptr3 + 2)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp31 = tl.load(in_ptr3 + 3)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp38 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp39 = tl.load(in_ptr5 + x0, xmask)
tmp40 = tl.load(in_ptr6 + 0)
tmp41 = tl.broadcast_to(tmp40, [XBLOCK])
tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp47 = tl.load(in_ptr6 + 1)
tmp48 = tl.broadcast_to(tmp47, [XBLOCK])
tmp54 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp55 = tl.load(in_ptr6 + 2)
tmp56 = tl.broadcast_to(tmp55, [XBLOCK])
tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp63 = tl.load(in_ptr6 + 3)
tmp64 = tl.broadcast_to(tmp63, [XBLOCK])
tmp70 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp71 = tl.load(in_ptr8 + x0, xmask)
tmp72 = tl.load(in_ptr9 + 0)
tmp73 = tl.broadcast_to(tmp72, [XBLOCK])
tmp78 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp79 = tl.load(in_ptr9 + 1)
tmp80 = tl.broadcast_to(tmp79, [XBLOCK])
tmp86 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp87 = tl.load(in_ptr9 + 2)
tmp88 = tl.broadcast_to(tmp87, [XBLOCK])
tmp94 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp95 = tl.load(in_ptr9 + 3)
tmp96 = tl.broadcast_to(tmp95, [XBLOCK])
tmp102 = tl.load(in_ptr10 + 4 * x0, xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp103 = tl.load(in_ptr11 + x0, xmask)
tmp104 = tl.load(in_ptr12 + 0)
tmp105 = tl.broadcast_to(tmp104, [XBLOCK])
tmp110 = tl.load(in_ptr10 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp111 = tl.load(in_ptr12 + 1)
tmp112 = tl.broadcast_to(tmp111, [XBLOCK])
tmp118 = tl.load(in_ptr10 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp119 = tl.load(in_ptr12 + 2)
tmp120 = tl.broadcast_to(tmp119, [XBLOCK])
tmp126 = tl.load(in_ptr10 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp127 = tl.load(in_ptr12 + 3)
tmp128 = tl.broadcast_to(tmp127, [XBLOCK])
tmp5 = tmp2 + tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tl.where(tmp1, tmp5, tmp7)
tmp9 = -8999999815811072.0
tmp10 = tl.where(tmp0, tmp8, tmp9)
tmp15 = tmp2 + tmp14
tmp16 = tmp15 * tmp6
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = tl.where(tmp11, tmp17, tmp9)
tmp19 = triton_helpers.maximum(tmp10, tmp18)
tmp24 = tmp2 + tmp23
tmp25 = tmp24 * tmp6
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp20, tmp26, tmp9)
tmp28 = triton_helpers.maximum(tmp19, tmp27)
tmp33 = tmp2 + tmp32
tmp34 = tmp33 * tmp6
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp29, tmp35, tmp9)
tmp37 = triton_helpers.maximum(tmp28, tmp36)
tmp42 = tmp39 + tmp41
tmp43 = tmp42 * tmp6
tmp44 = tl.where(tmp38, tmp42, tmp43)
tmp45 = tl.where(tmp0, tmp44, tmp9)
tmp49 = tmp39 + tmp48
tmp50 = tmp49 * tmp6
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tl.where(tmp11, tmp51, tmp9)
tmp53 = triton_helpers.maximum(tmp45, tmp52)
tmp57 = tmp39 + tmp56
tmp58 = tmp57 * tmp6
tmp59 = tl.where(tmp54, tmp57, tmp58)
tmp60 = tl.where(tmp20, tmp59, tmp9)
tmp61 = triton_helpers.maximum(tmp53, tmp60)
tmp65 = tmp39 + tmp64
tmp66 = tmp65 * tmp6
tmp67 = tl.where(tmp62, tmp65, tmp66)
tmp68 = tl.where(tmp29, tmp67, tmp9)
tmp69 = triton_helpers.maximum(tmp61, tmp68)
tmp74 = tmp71 + tmp73
tmp75 = tmp74 * tmp6
tmp76 = tl.where(tmp70, tmp74, tmp75)
tmp77 = tl.where(tmp0, tmp76, tmp9)
tmp81 = tmp71 + tmp80
tmp82 = tmp81 * tmp6
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tl.where(tmp11, tmp83, tmp9)
tmp85 = triton_helpers.maximum(tmp77, tmp84)
tmp89 = tmp71 + tmp88
tmp90 = tmp89 * tmp6
tmp91 = tl.where(tmp86, tmp89, tmp90)
tmp92 = tl.where(tmp20, tmp91, tmp9)
tmp93 = triton_helpers.maximum(tmp85, tmp92)
tmp97 = tmp71 + tmp96
tmp98 = tmp97 * tmp6
tmp99 = tl.where(tmp94, tmp97, tmp98)
tmp100 = tl.where(tmp29, tmp99, tmp9)
tmp101 = triton_helpers.maximum(tmp93, tmp100)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 * tmp6
tmp108 = tl.where(tmp102, tmp106, tmp107)
tmp109 = tl.where(tmp0, tmp108, tmp9)
tmp113 = tmp103 + tmp112
tmp114 = tmp113 * tmp6
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.where(tmp11, tmp115, tmp9)
tmp117 = triton_helpers.maximum(tmp109, tmp116)
tmp121 = tmp103 + tmp120
tmp122 = tmp121 * tmp6
tmp123 = tl.where(tmp118, tmp121, tmp122)
tmp124 = tl.where(tmp20, tmp123, tmp9)
tmp125 = triton_helpers.maximum(tmp117, tmp124)
tmp129 = tmp103 + tmp128
tmp130 = tmp129 * tmp6
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.where(tmp29, tmp131, tmp9)
tmp133 = triton_helpers.maximum(tmp125, tmp132)
tl.store(out_ptr0 + x0, tmp37, xmask)
tl.store(out_ptr1 + x0, tmp69, xmask)
tl.store(out_ptr2 + x0, tmp101, xmask)
tl.store(out_ptr3 + x0, tmp133, xmask)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, xmask).to(tl.int1)
tmp14 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr9 + x2, xmask).to(tl.int1)
tmp24 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr13 + x2, xmask).to(tl.int1)
tmp34 = tl.load(in_ptr14 + x1, xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr15 + x0, xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr16 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tl.where(tmp1, tmp4, tmp6)
tmp8 = -8999999815811072.0
tmp9 = tl.where(tmp0, tmp7, tmp8)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tmp16 * tmp5
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tl.where(tmp0, tmp18, tmp8)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp5
tmp28 = tl.where(tmp23, tmp26, tmp27)
tmp29 = tl.where(tmp0, tmp28, tmp8)
tmp31 = tmp29 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp5
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.where(tmp0, tmp38, tmp8)
tmp41 = tmp39 - tmp40
tmp42 = tl_math.exp(tmp41)
tl.store(out_ptr0 + x2, tmp12, xmask)
tl.store(out_ptr1 + x2, tmp22, xmask)
tl.store(out_ptr2 + x2, tmp32, xmask)
tl.store(out_ptr3 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tl.full([1], 16, tl.int64)
tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + x2, tmp52, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 1), (1, 1))
assert_size_stride(primals_8, (4, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 1), (1, 1))
assert_size_stride(primals_11, (4, 1), (1, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, 1), (1, 1))
assert_size_stride(primals_14, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_gt_1[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_6, out=buf9)
del primals_6
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf9, primals_7, out=buf10)
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf9, primals_8, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf10, buf11, buf12, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_9, out=buf17)
del primals_9
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf17, primals_10, out=buf18)
buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf17, primals_11, out=buf19)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf18, buf19, buf20, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_12, out=buf25)
del primals_12
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf25, primals_13, out=buf26)
buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf25, primals_14, out=buf27)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf26, buf27, buf28, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(4)](buf4,
buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19,
buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(16)](buf4,
buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20,
buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14,
buf22, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf10
del buf11
del buf13
del buf18
del buf19
del buf2
del buf21
del buf26
del buf27
del buf29
del buf5
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
extern_kernels.mm(buf7, buf0, out=buf8)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf14, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = buf14
del buf14
extern_kernels.mm(buf15, buf9, out=buf16)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf22, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf24 = buf22
del buf22
extern_kernels.mm(buf23, buf17, out=buf24)
buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf30, buf31, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf32 = buf30
del buf30
extern_kernels.mm(buf31, buf25, out=buf32)
buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_cat_5[grid(64)](buf8, buf16, buf24, buf32, buf33,
64, XBLOCK=64, num_warps=1, num_stages=1)
return (buf33, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20,
buf23, buf24, buf28, buf31, buf32, reinterpret_tensor(buf25, (4, 4),
(1, 4), 0), reinterpret_tensor(primals_14, (1, 4), (1, 1), 0),
reinterpret_tensor(primals_13, (1, 4), (1, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_11, (1, 4), (1, 1), 0), reinterpret_tensor(primals_10, (1,
4), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_8, (1, 4), (1, 1), 0),
reinterpret_tensor(primals_7, (1, 4), (1, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_4, (1, 4), (1, 1), 0), reinterpret_tensor(primals_3, (1, 4),
(1, 1), 0))
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
in_features, out_features).type(torch.FloatTensor if torch.cuda
.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
out_features, 1).type(torch.FloatTensor if torch.cuda.
is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
out_features, 1).type(torch.FloatTensor if torch.cuda.
is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
h.size()[0]
f_1 = h @ self.a1
f_2 = h @ self.a2
e = self.leakyrelu(f_1 + f_2.transpose(0, 1))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GATNew(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super(GATNew, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, input_0, input_1):
primals_1 = self.attention_0.W
primals_3 = self.attention_0.a1
primals_4 = self.attention_0.a2
primals_2 = self.attention_1.W
primals_7 = self.attention_1.a1
primals_8 = self.attention_1.a2
primals_5 = self.attention_2.W
primals_10 = self.attention_2.a1
primals_11 = self.attention_2.a2
primals_6 = self.attention_3.W
primals_13 = self.attention_3.a1
primals_14 = self.attention_3.a2
primals_9 = input_0
primals_12 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| leiloong/PaperRobot | GAT | false | 7,104 | [
"MIT"
] | 1 | 070972dc1548571c28d89d2c54fb379e87d172c7 | https://github.com/leiloong/PaperRobot/tree/070972dc1548571c28d89d2c54fb379e87d172c7 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super().__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
in_features, out_features).type(torch.FloatTensor if torch.cuda
.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
out_features, 1).type(torch.FloatTensor if torch.cuda.
is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(
out_features, 1).type(torch.FloatTensor if torch.cuda.
is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
h.size()[0]
f_1 = h @ self.a1
f_2 = h @ self.a2
e = self.leakyrelu(f_1 + f_2.transpose(0, 1))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class Model(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super().__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4,
'nheads': 4}]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vh/cvhcrsxucgh7eot2p772apvh6wg7qihujnij7ewp3yqeqgpnmix6.py
# Topologically Sorted Source Nodes: [input_1, min_1, ne, mask, mul, sum_1, max_1, mul_1, sum_2, clamp, truediv, sub], Original ATen: [aten.sigmoid, aten.minimum, aten.ne, aten._to_copy, aten.mul, aten.sum, aten.maximum, aten.clamp, aten.div, aten.rsub]
# Source node to ATen node mapping:
# clamp => clamp_min
# input_1 => sigmoid
# mask => convert_element_type
# max_1 => maximum
# min_1 => minimum
# mul => mul
# mul_1 => mul_1
# ne => ne
# sub => sub
# sum_1 => sum_1
# sum_2 => sum_2
# truediv => div
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view,), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%sigmoid, %view_1), kwargs = {})
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%view_1, -1), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum, %convert_element_type), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%sigmoid, %view_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%maximum, %convert_element_type), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_2, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %clamp_min), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = triton_helpers.minimum(tmp1, tmp2)
tmp4 = -1.0
tmp5 = tmp2 != tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = triton_helpers.maximum(tmp1, tmp2)
tmp12 = tmp11 * tmp6
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp10 / tmp17
tmp19 = tmp16 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_1, min_1, ne, mask, mul, sum_1, max_1, mul_1, sum_2, clamp, truediv, sub], Original ATen: [aten.sigmoid, aten.minimum, aten.ne, aten._to_copy, aten.mul, aten.sum, aten.maximum, aten.clamp, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, ignore_target=-1):
super().__init__()
self.ignore_target = ignore_target
def forward(self, input, target):
"""
:param input: (N), logit
:param target: (N), {0, 1}
:return:
"""
input = torch.sigmoid(input.view(-1))
target = target.float().view(-1)
mask = (target != self.ignore_target).float()
return 1.0 - (torch.min(input, target) * mask).sum() / torch.clamp((
torch.max(input, target) * mask).sum(), min=1.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = triton_helpers.minimum(tmp1, tmp2)
tmp4 = -1.0
tmp5 = tmp2 != tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = triton_helpers.maximum(tmp1, tmp2)
tmp12 = tmp11 * tmp6
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp10 / tmp17
tmp19 = tmp16 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0[
grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class DiceLossNew(nn.Module):
def __init__(self, ignore_target=-1):
super().__init__()
self.ignore_target = ignore_target
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| liuhuaijjin/rpn_rois_proposals_layers | DiceLoss | false | 7,105 | [
"MIT"
] | 1 | c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | https://github.com/liuhuaijjin/rpn_rois_proposals_layers/tree/c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, ignore_target=-1):
super().__init__()
self.ignore_target = ignore_target
def forward(self, input, target):
"""
:param input: (N), logit
:param target: (N), {0, 1}
:return:
"""
input = torch.sigmoid(input.view(-1))
target = target.float().view(-1)
mask = (target != self.ignore_target).float()
return 1.0 - (torch.min(input, target) * mask).sum() / torch.clamp((
torch.max(input, target) * mask).sum(), min=1.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SmoothL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dp/cdpduibkoatpbjt2mqbfewmgyksux7x2pg5cnyn3ykjujchyfz76.py
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, mul_1, truediv, sub_1, loss], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.div, aten.where]
# Source node to ATen node mapping:
# diff => abs_1
# loss => where
# lt => lt
# mul => mul
# mul_1 => mul_1
# sub => sub
# sub_1 => sub_1
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=4] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %abs_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {})
triton_poi_fused_abs_div_lt_mul_sub_where_0 = async_compile.triton('triton_poi_fused_abs_div_lt_mul_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_div_lt_mul_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_div_lt_mul_sub_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 0.5
tmp7 = tmp3 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp6
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, mul_1, truediv, sub_1, loss], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.div, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_div_lt_mul_sub_where_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SmoothL1Loss(nn.Module):
def __init__(self, beta=1.0, reduction='mean'):
super().__init__()
self.beta = beta
self.reduction = reduction
def forward(self, pred, target, weight=None):
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < self.beta, 0.5 * diff * diff / self.beta,
diff - 0.5 * self.beta)
if weight is not None:
loss = loss * weight
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_div_lt_mul_sub_where_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 0.5
tmp7 = tmp3 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp6
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_div_lt_mul_sub_where_0[grid(256)](arg0_1,
arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SmoothL1LossNew(nn.Module):
def __init__(self, beta=1.0, reduction='mean'):
super().__init__()
self.beta = beta
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| liuhuaijjin/rpn_rois_proposals_layers | SmoothL1Loss | false | 7,106 | [
"MIT"
] | 1 | c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | https://github.com/liuhuaijjin/rpn_rois_proposals_layers/tree/c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, beta=1.0, reduction='mean'):
super().__init__()
self.beta = beta
self.reduction = reduction
def forward(self, pred, target, weight=None):
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < self.beta, 0.5 * diff * diff / self.beta,
diff - 0.5 * self.beta)
if weight is not None:
loss = loss * weight
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LR_PAD | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/im/cimndydfpnseebt2myhntixtonx4t2cbfcxglro72lbyd5q2jqag.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_1, %arg0_1, %slice_2], 3), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 + (4*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + ((4*x1) + ((-1) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 6, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + (4*x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 384, grid=grid(384), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def lr_pad(x, padding=1):
return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3)
class LR_PAD(nn.Module):
def __init__(self, padding=1):
super(LR_PAD, self).__init__()
self.padding = padding
def forward(self, x):
return lr_pad(x, self.padding)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (4 * x1 + (-1 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def lr_pad(x, padding=1):
return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3)
class LR_PADNew(nn.Module):
def __init__(self, padding=1):
super(LR_PADNew, self).__init__()
self.padding = padding
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| lixuran/Room_Layout_Estimation_new | LR_PAD | false | 7,107 | [
"MIT"
] | 1 | 8e73b66e1418675e5bb82f3780091c406fe721d8 | https://github.com/lixuran/Room_Layout_Estimation_new/tree/8e73b66e1418675e5bb82f3780091c406fe721d8 | import torch
from torch import nn
def lr_pad(x, padding=1):
return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3)
class Model(nn.Module):
def __init__(self, padding=1):
super().__init__()
self.padding = padding
def forward(self, x):
return lr_pad(x, self.padding)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ch/cchblwxibhbejgpezaajsszmynwayrtalecnd5ksg22infzh2v3o.py
# Topologically Sorted Source Nodes: [dot, dot_1], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# dot => add
# dot_1 => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %expand), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pw/cpw7p22sygsv72j67klg2k3xeokdqrvalgmkjpkzpqpj5pv4xmkc.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weight => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot, dot_1], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(primals_3, buf0, primals_5, buf1, 256, grid=grid(256), stream=stream0)
del primals_3
del primals_5
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf3, buf4, buf5, buf6, 4, 16, grid=grid(4), stream=stream0)
buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(primals_2, (4, 16, 4), (64, 4, 1), 0), out=buf7)
del buf6
return (reinterpret_tensor(buf7, (4, 4), (4, 1), 0), primals_1, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_2, (4, 4, 16), (64, 1, 4), 0), primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.min_value = -100000000.0
def forward(self, h, att_feats, p_att_feats):
batch_size = h.size(0)
att_size = att_feats.numel() // batch_size // self.rnn_size
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h)
att_h = att_h.unsqueeze(1).expand_as(att)
dot = att + att_h
dot = F.tanh(dot)
dot = dot.view(-1, self.att_hid_size)
dot = self.alpha_net(dot)
dot = dot.view(-1, att_size)
weight = F.softmax(dot, dim=1)
att_feats_ = att_feats.view(-1, att_size, self.rnn_size)
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1)
return att_res
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4,
4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(rnn_size=4, att_hid_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_4, (4, 4),
(1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(256)](primals_3, buf0, primals_5,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
del primals_5
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(4)](buf3, buf4, buf5, buf6, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1),
0), reinterpret_tensor(primals_2, (4, 16, 4), (64, 4, 1), 0),
out=buf7)
del buf6
return reinterpret_tensor(buf7, (4, 4), (4, 1), 0
), primals_1, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_2,
(4, 4, 16), (64, 1, 4), 0), primals_6
class AttentionNew(nn.Module):
def __init__(self, opt):
super(AttentionNew, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.min_value = -100000000.0
def forward(self, input_0, input_1, input_2):
primals_1 = self.h2att.weight
primals_5 = self.h2att.bias
primals_6 = self.alpha_net.weight
primals_7 = self.alpha_net.bias
primals_4 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| liuqihan/NeuralBabyTalk | Attention | false | 7,108 | [
"MIT"
] | 1 | 4a2ef428ec9f251a1eb898cc0c828a6ef1c55e69 | https://github.com/liuqihan/NeuralBabyTalk/tree/4a2ef428ec9f251a1eb898cc0c828a6ef1c55e69 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
class Model(nn.Module):
def __init__(self, opt):
super().__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.min_value = -100000000.0
def forward(self, h, att_feats, p_att_feats):
batch_size = h.size(0)
att_size = att_feats.numel() // batch_size // self.rnn_size
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h)
att_h = att_h.unsqueeze(1).expand_as(att)
dot = att + att_h
dot = F.tanh(dot)
dot = dot.view(-1, self.att_hid_size)
dot = self.alpha_net(dot)
dot = dot.view(-1, att_size)
weight = F.softmax(dot, dim=1)
att_feats_ = att_feats.view(-1, att_size, self.rnn_size)
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1)
return att_res
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4,
4, 4])]
def get_init_inputs():
return []
|
RewardCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/li/cli44uiguy6lxjb4xze6gmfvocbzquptllewuf2oajp3vjhlvcar.py
# Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# mul => mul
# neg => neg
# output => mul_1
# output_1 => div
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %view_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_div_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = r0 % 4
tmp12 = tl.full([1, 1], 0, tl.int64)
tmp13 = tmp11 >= tmp12
tmp14 = tl.full([1, 1], 1, tl.int64)
tmp15 = tmp11 < tmp14
tmp16 = tl.full([1, 1], True, tl.int1)
tmp17 = tl.full(tmp16.shape, False, tmp16.dtype)
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tmp11 >= tmp14
tmp20 = tl.full([1, 1], 4, tl.int64)
tmp21 = tmp11 < tmp20
tmp22 = tl.load(in_ptr0 + (tl.broadcast_to((4*(r0 // 4)) + ((-1) + (r0 % 4)), [XBLOCK, RBLOCK])), tmp19, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 > tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp19, tmp23, tmp24)
tmp26 = tl.where(tmp15, tmp18, tmp25)
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp10 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = tmp26.to(tl.int64)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp31 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0.run(buf2, arg1_1, arg0_1, arg2_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import *
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = seq > 0
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1
).reshape(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = r0 % 4
tmp12 = tl.full([1, 1], 0, tl.int64)
tmp14 = tl.full([1, 1], 1, tl.int64)
tmp15 = tmp11 < tmp14
tmp16 = tl.full([1, 1], True, tl.int1)
tmp17 = tl.full(tmp16.shape, False, tmp16.dtype)
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tmp11 >= tmp14
tl.full([1, 1], 4, tl.int64)
tmp22 = tl.load(in_ptr0 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4),
[XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 > tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp19, tmp23, tmp24)
tmp26 = tl.where(tmp15, tmp18, tmp25)
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp10 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = tmp26.to(tl.int64)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp31 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class RewardCriterionNew(nn.Module):
def __init__(self):
super(RewardCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| linzhlalala/self-critical.pytorch | RewardCriterion | false | 7,109 | [
"MIT"
] | 1 | b856250ac52ba63656b1b03cdc3d7e830ed43f68 | https://github.com/linzhlalala/self-critical.pytorch/tree/b856250ac52ba63656b1b03cdc3d7e830ed43f68 | import torch
import torch.nn as nn
from torch.autograd import *
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = seq > 0
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1
).reshape(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return []
|
MaskLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/o5/co5jkqyvwvljd5gniaxw3e5tv76zqkpue4b4bnkkbf7haycaxfte.py
# Topologically Sorted Source Nodes: [a, mul_2, sub_1, mul_3, exp_1, add_1, b, dot, add_2, sum_1, div, a_1, mul_5, sub_2, mul_6, exp_2, add_3, b_1, dot_1, add_4, sum_2, div_1, a_2, mul_8, sub_3, mul_9, exp_3, add_5, b_2, dot_2, add_6, sum_3, div_2, a_3, mul_11, sub_4, mul_12, exp_4, add_7, b_3, dot_3, add_8, sum_4, div_3], Original ATen: [aten.minimum, aten.mul, aten.sub, aten.exp, aten.add, aten.reciprocal, aten.dot, aten.sum, aten.div]
# Source node to ATen node mapping:
# a => minimum_1
# a_1 => minimum_2
# a_2 => minimum_3
# a_3 => minimum_4
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# add_7 => add_7
# add_8 => add_8
# b => mul_5, reciprocal_1
# b_1 => mul_10, reciprocal_2
# b_2 => mul_15, reciprocal_3
# b_3 => mul_20, reciprocal_4
# div => div
# div_1 => div_1
# div_2 => div_2
# div_3 => div_3
# dot => mul_6, sum_1
# dot_1 => mul_11, sum_3
# dot_2 => mul_16, sum_5
# dot_3 => mul_21, sum_7
# exp_1 => exp_1
# exp_2 => exp_2
# exp_3 => exp_3
# exp_4 => exp_4
# mul_11 => mul_18
# mul_12 => mul_19
# mul_2 => mul_3
# mul_3 => mul_4
# mul_5 => mul_8
# mul_6 => mul_9
# mul_8 => mul_13
# mul_9 => mul_14
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sum_1 => sum_2
# sum_2 => sum_4
# sum_3 => sum_6
# sum_4 => sum_8
# Graph fragment:
# %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%select_2, %select_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 0.55), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, -100), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_4,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1), kwargs = {})
# %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_1,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum_1, %mul_5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_2, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_2, [-1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %minimum_2 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%select_5, %select_4), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, 0.55), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_4, %mul_8), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, -100), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_9,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_2, 1), kwargs = {})
# %reciprocal_2 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_3,), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_2, 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum_2, %mul_10), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_11,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_5, %arg1_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_4, [-1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %sum_4), kwargs = {})
# %minimum_3 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%select_8, %select_7), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, 0.55), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_7, %mul_13), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, -100), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_14,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_3, 1), kwargs = {})
# %reciprocal_3 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_5,), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_3, 1), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum_3, %mul_15), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_16,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_8, %arg1_1), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_6, [-1]), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_5, %sum_6), kwargs = {})
# %minimum_4 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%select_11, %select_10), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, 0.55), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_10, %mul_18), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, -100), kwargs = {})
# %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_19,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_4, 1), kwargs = {})
# %reciprocal_4 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_7,), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_4, 1), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum_4, %mul_20), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_21,), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_11, %arg1_1), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_8, [-1]), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_7, %sum_8), kwargs = {})
triton_per_fused_add_div_dot_exp_minimum_mul_reciprocal_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_dot_exp_minimum_mul_reciprocal_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_exp_minimum_mul_reciprocal_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 44, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_dot_exp_minimum_mul_reciprocal_sub_sum_0(in_ptr0, in_ptr1, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (12 + r0), None)
tmp1 = tl.load(in_ptr1 + (12 + r0), None)
tmp3 = tl.load(in_ptr1 + (3))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5 = tl.load(in_ptr1 + (7))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.load(in_ptr1 + (11))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.load(in_ptr1 + (15))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp29 = tl.load(in_ptr0 + (8 + r0), None)
tmp30 = tl.load(in_ptr1 + (8 + r0), None)
tmp32 = tl.load(in_ptr1 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp34 = tl.load(in_ptr1 + (6))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = tl.load(in_ptr1 + (10))
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp40 = tl.load(in_ptr1 + (14))
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp54 = tl.load(in_ptr0 + (4 + r0), None)
tmp55 = tl.load(in_ptr1 + (4 + r0), None)
tmp57 = tl.load(in_ptr1 + (1))
tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK])
tmp59 = tl.load(in_ptr1 + (5))
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp62 = tl.load(in_ptr1 + (9))
tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK])
tmp65 = tl.load(in_ptr1 + (13))
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp79 = tl.load(in_ptr0 + (r0), None)
tmp80 = tl.load(in_ptr1 + (r0), None)
tmp82 = tl.load(in_ptr1 + (0))
tmp83 = tl.broadcast_to(tmp82, [XBLOCK, RBLOCK])
tmp84 = tl.load(in_ptr1 + (4))
tmp85 = tl.broadcast_to(tmp84, [XBLOCK, RBLOCK])
tmp87 = tl.load(in_ptr1 + (8))
tmp88 = tl.broadcast_to(tmp87, [XBLOCK, RBLOCK])
tmp90 = tl.load(in_ptr1 + (12))
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp104 = tl.load(in_ptr0 + (0))
tmp105 = tl.broadcast_to(tmp104, [XBLOCK, RBLOCK])
tmp106 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr0 + (1))
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp110 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr0 + (2))
tmp114 = tl.broadcast_to(tmp113, [XBLOCK, RBLOCK])
tmp115 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp118 = tl.load(in_ptr0 + (3))
tmp119 = tl.broadcast_to(tmp118, [XBLOCK, RBLOCK])
tmp120 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp124 = tl.load(in_ptr0 + (4))
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = tl.load(in_ptr0 + (5))
tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK])
tmp131 = tl.load(in_ptr0 + (6))
tmp132 = tl.broadcast_to(tmp131, [XBLOCK, RBLOCK])
tmp135 = tl.load(in_ptr0 + (7))
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp140 = tl.load(in_ptr0 + (8))
tmp141 = tl.broadcast_to(tmp140, [XBLOCK, RBLOCK])
tmp143 = tl.load(in_ptr0 + (9))
tmp144 = tl.broadcast_to(tmp143, [XBLOCK, RBLOCK])
tmp147 = tl.load(in_ptr0 + (10))
tmp148 = tl.broadcast_to(tmp147, [XBLOCK, RBLOCK])
tmp151 = tl.load(in_ptr0 + (11))
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp156 = tl.load(in_ptr0 + (12))
tmp157 = tl.broadcast_to(tmp156, [XBLOCK, RBLOCK])
tmp159 = tl.load(in_ptr0 + (13))
tmp160 = tl.broadcast_to(tmp159, [XBLOCK, RBLOCK])
tmp163 = tl.load(in_ptr0 + (14))
tmp164 = tl.broadcast_to(tmp163, [XBLOCK, RBLOCK])
tmp167 = tl.load(in_ptr0 + (15))
tmp168 = tl.broadcast_to(tmp167, [XBLOCK, RBLOCK])
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.55
tmp15 = tmp13 * tmp14
tmp16 = tmp1 - tmp15
tmp17 = -100.0
tmp18 = tmp16 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tl.full([1, 1], 1, tl.int32)
tmp23 = tmp22 / tmp21
tmp24 = tmp23 * tmp20
tmp25 = tmp2 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp31 = triton_helpers.minimum(tmp29, tmp30)
tmp36 = triton_helpers.maximum(tmp33, tmp35)
tmp39 = triton_helpers.maximum(tmp36, tmp38)
tmp42 = triton_helpers.maximum(tmp39, tmp41)
tmp43 = tmp42 * tmp14
tmp44 = tmp30 - tmp43
tmp45 = tmp44 * tmp17
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp46 + tmp20
tmp48 = tmp22 / tmp47
tmp49 = tmp48 * tmp20
tmp50 = tmp31 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = tl.sum(tmp51, 1)[:, None]
tmp56 = triton_helpers.minimum(tmp54, tmp55)
tmp61 = triton_helpers.maximum(tmp58, tmp60)
tmp64 = triton_helpers.maximum(tmp61, tmp63)
tmp67 = triton_helpers.maximum(tmp64, tmp66)
tmp68 = tmp67 * tmp14
tmp69 = tmp55 - tmp68
tmp70 = tmp69 * tmp17
tmp71 = tl_math.exp(tmp70)
tmp72 = tmp71 + tmp20
tmp73 = tmp22 / tmp72
tmp74 = tmp73 * tmp20
tmp75 = tmp56 * tmp74
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp81 = triton_helpers.minimum(tmp79, tmp80)
tmp86 = triton_helpers.maximum(tmp83, tmp85)
tmp89 = triton_helpers.maximum(tmp86, tmp88)
tmp92 = triton_helpers.maximum(tmp89, tmp91)
tmp93 = tmp92 * tmp14
tmp94 = tmp80 - tmp93
tmp95 = tmp94 * tmp17
tmp96 = tl_math.exp(tmp95)
tmp97 = tmp96 + tmp20
tmp98 = tmp22 / tmp97
tmp99 = tmp98 * tmp20
tmp100 = tmp81 * tmp99
tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK])
tmp103 = tl.sum(tmp101, 1)[:, None]
tmp107 = tmp105 + tmp106
tmp111 = tmp109 + tmp110
tmp112 = tmp107 + tmp111
tmp116 = tmp114 + tmp115
tmp117 = tmp112 + tmp116
tmp121 = tmp119 + tmp120
tmp122 = tmp117 + tmp121
tmp123 = tmp103 / tmp122
tmp126 = tmp125 + tmp106
tmp129 = tmp128 + tmp110
tmp130 = tmp126 + tmp129
tmp133 = tmp132 + tmp115
tmp134 = tmp130 + tmp133
tmp137 = tmp136 + tmp120
tmp138 = tmp134 + tmp137
tmp139 = tmp78 / tmp138
tmp142 = tmp141 + tmp106
tmp145 = tmp144 + tmp110
tmp146 = tmp142 + tmp145
tmp149 = tmp148 + tmp115
tmp150 = tmp146 + tmp149
tmp153 = tmp152 + tmp120
tmp154 = tmp150 + tmp153
tmp155 = tmp53 / tmp154
tmp158 = tmp157 + tmp106
tmp161 = tmp160 + tmp110
tmp162 = tmp158 + tmp161
tmp165 = tmp164 + tmp115
tmp166 = tmp162 + tmp165
tmp169 = tmp168 + tmp120
tmp170 = tmp166 + tmp169
tmp171 = tmp28 / tmp170
tl.store(out_ptr4 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp123, None)
tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp139, None)
tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp155, None)
tl.store(out_ptr7 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp171, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pd/cpdke6scsojcjiseffjq2rccky6wpkjfkayjnletpzhsryazuo4c.py
# Topologically Sorted Source Nodes: [sums], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# sums => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_7, %mul_12, %mul_17, %mul_22],), kwargs = {})
triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr1 + ((-4) + x0), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp14 * tmp6
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp13, tmp15, tmp16)
tmp18 = tmp0 >= tmp11
tmp19 = tl.full([1], 12, tl.int64)
tmp20 = tmp0 < tmp19
tmp21 = tmp18 & tmp20
tmp22 = tl.load(in_ptr2 + ((-8) + x0), tmp21 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 * tmp6
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp0 >= tmp19
tmp27 = tl.full([1], 16, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tl.load(in_ptr3 + ((-12) + x0), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = tmp29 * tmp6
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp26, tmp30, tmp31)
tmp33 = tl.where(tmp21, tmp25, tmp32)
tmp34 = tl.where(tmp13, tmp17, tmp33)
tmp35 = tl.where(tmp4, tmp9, tmp34)
tl.store(out_ptr0 + (x0), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sy/csyklj4uv2s23rzfh7b6nk4ezlhjwpn4xxwzmfyehwsr2vie47pe.py
# Topologically Sorted Source Nodes: [setitem, mean], Original ATen: [aten.lift_fresh, aten.index_put, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%view, [%isnan], %full_default), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%index_put,), kwargs = {})
triton_per_fused_index_put_lift_fresh_mean_2 = async_compile.triton('triton_per_fused_index_put_lift_fresh_mean_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_lift_fresh_mean_2', 'mutated_arg_names': ['in_out_ptr0', 'in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_lift_fresh_mean_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tl.where(tmp1, tmp2, tmp0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 16.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp3, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf5 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf7 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [a, mul_2, sub_1, mul_3, exp_1, add_1, b, dot, add_2, sum_1, div, a_1, mul_5, sub_2, mul_6, exp_2, add_3, b_1, dot_1, add_4, sum_2, div_1, a_2, mul_8, sub_3, mul_9, exp_3, add_5, b_2, dot_2, add_6, sum_3, div_2, a_3, mul_11, sub_4, mul_12, exp_4, add_7, b_3, dot_3, add_8, sum_4, div_3], Original ATen: [aten.minimum, aten.mul, aten.sub, aten.exp, aten.add, aten.reciprocal, aten.dot, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_dot_exp_minimum_mul_reciprocal_sub_sum_0.run(arg0_1, arg1_1, buf1, buf3, buf5, buf7, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
buf8 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [sums], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf1, buf3, buf5, buf7, buf8, 16, grid=grid(16), stream=stream0)
del buf1
del buf3
del buf5
del buf7
buf11 = empty_strided_cuda((), (), torch.float32)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [setitem, mean], Original ATen: [aten.lift_fresh, aten.index_put, aten.mean]
triton_per_fused_index_put_lift_fresh_mean_2.run(buf12, buf8, buf8, 1, 16, grid=grid(1), stream=stream0)
del buf8
return (buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.hub
class MaskLoss(nn.Module):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(MaskLoss, self).__init__()
self.reduction = reduction
def forward(self, input, target):
N, _W = input.size()
torch.min(input, target)
values, index = torch.max(target, 0)
1 / (1 + torch.exp(-100 * (target - 0.55 * values)))
sums = []
for n in range(N):
value = values[n]
index[n]
tar = target[n]
inp = input[n]
a = torch.min(inp, tar)
b = 1 / (1 + torch.exp(-100 * (tar - 0.55 * value)))
sums.append(2 * torch.div(torch.dot(a, b), torch.sum(inp +
target, axis=-1)))
sums = torch.stack(sums)
sums[torch.isnan(sums)] = 0.0
return sums.mean()
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.hub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_dot_exp_minimum_mul_reciprocal_sub_sum_0(in_ptr0,
in_ptr1, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (12 + r0), None)
tmp1 = tl.load(in_ptr1 + (12 + r0), None)
tmp3 = tl.load(in_ptr1 + 3)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5 = tl.load(in_ptr1 + 7)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.load(in_ptr1 + 11)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.load(in_ptr1 + 15)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp29 = tl.load(in_ptr0 + (8 + r0), None)
tmp30 = tl.load(in_ptr1 + (8 + r0), None)
tmp32 = tl.load(in_ptr1 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp34 = tl.load(in_ptr1 + 6)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = tl.load(in_ptr1 + 10)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp40 = tl.load(in_ptr1 + 14)
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp54 = tl.load(in_ptr0 + (4 + r0), None)
tmp55 = tl.load(in_ptr1 + (4 + r0), None)
tmp57 = tl.load(in_ptr1 + 1)
tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK])
tmp59 = tl.load(in_ptr1 + 5)
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp62 = tl.load(in_ptr1 + 9)
tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK])
tmp65 = tl.load(in_ptr1 + 13)
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp79 = tl.load(in_ptr0 + r0, None)
tmp80 = tl.load(in_ptr1 + r0, None)
tmp82 = tl.load(in_ptr1 + 0)
tmp83 = tl.broadcast_to(tmp82, [XBLOCK, RBLOCK])
tmp84 = tl.load(in_ptr1 + 4)
tmp85 = tl.broadcast_to(tmp84, [XBLOCK, RBLOCK])
tmp87 = tl.load(in_ptr1 + 8)
tmp88 = tl.broadcast_to(tmp87, [XBLOCK, RBLOCK])
tmp90 = tl.load(in_ptr1 + 12)
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp104 = tl.load(in_ptr0 + 0)
tmp105 = tl.broadcast_to(tmp104, [XBLOCK, RBLOCK])
tmp106 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr0 + 1)
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp110 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp113 = tl.load(in_ptr0 + 2)
tmp114 = tl.broadcast_to(tmp113, [XBLOCK, RBLOCK])
tmp115 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp118 = tl.load(in_ptr0 + 3)
tmp119 = tl.broadcast_to(tmp118, [XBLOCK, RBLOCK])
tmp120 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp124 = tl.load(in_ptr0 + 4)
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = tl.load(in_ptr0 + 5)
tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK])
tmp131 = tl.load(in_ptr0 + 6)
tmp132 = tl.broadcast_to(tmp131, [XBLOCK, RBLOCK])
tmp135 = tl.load(in_ptr0 + 7)
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp140 = tl.load(in_ptr0 + 8)
tmp141 = tl.broadcast_to(tmp140, [XBLOCK, RBLOCK])
tmp143 = tl.load(in_ptr0 + 9)
tmp144 = tl.broadcast_to(tmp143, [XBLOCK, RBLOCK])
tmp147 = tl.load(in_ptr0 + 10)
tmp148 = tl.broadcast_to(tmp147, [XBLOCK, RBLOCK])
tmp151 = tl.load(in_ptr0 + 11)
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp156 = tl.load(in_ptr0 + 12)
tmp157 = tl.broadcast_to(tmp156, [XBLOCK, RBLOCK])
tmp159 = tl.load(in_ptr0 + 13)
tmp160 = tl.broadcast_to(tmp159, [XBLOCK, RBLOCK])
tmp163 = tl.load(in_ptr0 + 14)
tmp164 = tl.broadcast_to(tmp163, [XBLOCK, RBLOCK])
tmp167 = tl.load(in_ptr0 + 15)
tmp168 = tl.broadcast_to(tmp167, [XBLOCK, RBLOCK])
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.55
tmp15 = tmp13 * tmp14
tmp16 = tmp1 - tmp15
tmp17 = -100.0
tmp18 = tmp16 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tl.full([1, 1], 1, tl.int32)
tmp23 = tmp22 / tmp21
tmp24 = tmp23 * tmp20
tmp25 = tmp2 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp31 = triton_helpers.minimum(tmp29, tmp30)
tmp36 = triton_helpers.maximum(tmp33, tmp35)
tmp39 = triton_helpers.maximum(tmp36, tmp38)
tmp42 = triton_helpers.maximum(tmp39, tmp41)
tmp43 = tmp42 * tmp14
tmp44 = tmp30 - tmp43
tmp45 = tmp44 * tmp17
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp46 + tmp20
tmp48 = tmp22 / tmp47
tmp49 = tmp48 * tmp20
tmp50 = tmp31 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = tl.sum(tmp51, 1)[:, None]
tmp56 = triton_helpers.minimum(tmp54, tmp55)
tmp61 = triton_helpers.maximum(tmp58, tmp60)
tmp64 = triton_helpers.maximum(tmp61, tmp63)
tmp67 = triton_helpers.maximum(tmp64, tmp66)
tmp68 = tmp67 * tmp14
tmp69 = tmp55 - tmp68
tmp70 = tmp69 * tmp17
tmp71 = tl_math.exp(tmp70)
tmp72 = tmp71 + tmp20
tmp73 = tmp22 / tmp72
tmp74 = tmp73 * tmp20
tmp75 = tmp56 * tmp74
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp81 = triton_helpers.minimum(tmp79, tmp80)
tmp86 = triton_helpers.maximum(tmp83, tmp85)
tmp89 = triton_helpers.maximum(tmp86, tmp88)
tmp92 = triton_helpers.maximum(tmp89, tmp91)
tmp93 = tmp92 * tmp14
tmp94 = tmp80 - tmp93
tmp95 = tmp94 * tmp17
tmp96 = tl_math.exp(tmp95)
tmp97 = tmp96 + tmp20
tmp98 = tmp22 / tmp97
tmp99 = tmp98 * tmp20
tmp100 = tmp81 * tmp99
tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK])
tmp103 = tl.sum(tmp101, 1)[:, None]
tmp107 = tmp105 + tmp106
tmp111 = tmp109 + tmp110
tmp112 = tmp107 + tmp111
tmp116 = tmp114 + tmp115
tmp117 = tmp112 + tmp116
tmp121 = tmp119 + tmp120
tmp122 = tmp117 + tmp121
tmp123 = tmp103 / tmp122
tmp126 = tmp125 + tmp106
tmp129 = tmp128 + tmp110
tmp130 = tmp126 + tmp129
tmp133 = tmp132 + tmp115
tmp134 = tmp130 + tmp133
tmp137 = tmp136 + tmp120
tmp138 = tmp134 + tmp137
tmp139 = tmp78 / tmp138
tmp142 = tmp141 + tmp106
tmp145 = tmp144 + tmp110
tmp146 = tmp142 + tmp145
tmp149 = tmp148 + tmp115
tmp150 = tmp146 + tmp149
tmp153 = tmp152 + tmp120
tmp154 = tmp150 + tmp153
tmp155 = tmp53 / tmp154
tmp158 = tmp157 + tmp106
tmp161 = tmp160 + tmp110
tmp162 = tmp158 + tmp161
tmp165 = tmp164 + tmp115
tmp166 = tmp162 + tmp165
tmp169 = tmp168 + tmp120
tmp170 = tmp166 + tmp169
tmp171 = tmp28 / tmp170
tl.store(out_ptr4 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp123, None)
tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp139, None)
tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp155, None)
tl.store(out_ptr7 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp171, None)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr1 + (-4 + x0), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp14 * tmp6
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp13, tmp15, tmp16)
tmp18 = tmp0 >= tmp11
tmp19 = tl.full([1], 12, tl.int64)
tmp20 = tmp0 < tmp19
tmp21 = tmp18 & tmp20
tmp22 = tl.load(in_ptr2 + (-8 + x0), tmp21 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp22 * tmp6
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp0 >= tmp19
tl.full([1], 16, tl.int64)
tmp29 = tl.load(in_ptr3 + (-12 + x0), tmp26 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp29 * tmp6
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp26, tmp30, tmp31)
tmp33 = tl.where(tmp21, tmp25, tmp32)
tmp34 = tl.where(tmp13, tmp17, tmp33)
tmp35 = tl.where(tmp4, tmp9, tmp34)
tl.store(out_ptr0 + x0, tmp35, xmask)
@triton.jit
def triton_per_fused_index_put_lift_fresh_mean_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tl.where(tmp1, tmp2, tmp0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 16.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp3, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
buf7 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_dot_exp_minimum_mul_reciprocal_sub_sum_0[grid
(1)](arg0_1, arg1_1, buf1, buf3, buf5, buf7, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf8 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_stack_1[grid(16)](buf1, buf3, buf5, buf7, buf8, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf3
del buf5
del buf7
buf11 = empty_strided_cuda((), (), torch.float32)
buf12 = buf11
del buf11
triton_per_fused_index_put_lift_fresh_mean_2[grid(1)](buf12, buf8,
buf8, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf8
return buf12,
class MaskLossNew(nn.Module):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(MaskLossNew, self).__init__()
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lisadunlap/explainable-nbdt | MaskLoss | false | 7,110 | [
"MIT"
] | 1 | e045bfd0b55b21fd87c9a233b73a0ca77672efff | https://github.com/lisadunlap/explainable-nbdt/tree/e045bfd0b55b21fd87c9a233b73a0ca77672efff | import torch
import torch.nn as nn
import torch.hub
class Model(nn.Module):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super().__init__()
self.reduction = reduction
def forward(self, input, target):
N, _W = input.size()
torch.min(input, target)
values, index = torch.max(target, 0)
1 / (1 + torch.exp(-100 * (target - 0.55 * values)))
sums = []
for n in range(N):
value = values[n]
index[n]
tar = target[n]
inp = input[n]
a = torch.min(inp, tar)
b = 1 / (1 + torch.exp(-100 * (tar - 0.55 * value)))
sums.append(2 * torch.div(torch.dot(a, b), torch.sum(inp +
target, axis=-1)))
sums = torch.stack(sums)
sums[torch.isnan(sums)] = 0.0
return sums.mean()
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
rbbox_corners_aligned | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pi/cpib2r3a57ou2tmjyho4d4hbocvarplps4zjw6e7lgbij7hb5e5p.py
# Topologically Sorted Source Nodes: [corners, mul, setitem, mul_1, setitem_1, mul_2, setitem_2, mul_3, setitem_3], Original ATen: [aten.zeros, aten.mul, aten.copy]
# Source node to ATen node mapping:
# corners => full_default
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# setitem => copy
# setitem_1 => copy_1
# setitem_2 => copy_2
# setitem_3 => copy_3
# Graph fragment:
# %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 2, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, -0.5), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_5, %mul), kwargs = {})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int, %copy, 1, 0), kwargs = {})
# %select_scatter_default_1 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %select_scatter_default, 1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, -0.5), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_12, %mul_1), kwargs = {})
# %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_1, %copy_1, 1, 0), kwargs = {})
# %select_scatter_default_3 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %select_scatter_default_2, 1, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, -0.5), kwargs = {})
# %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_19, %mul_2), kwargs = {})
# %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_2, %copy_2, 1, 1), kwargs = {})
# %select_scatter_default_5 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %select_scatter_default_4, 1, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, 0.5), kwargs = {})
# %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_26, %mul_3), kwargs = {})
# %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_3, %copy_3, 1, 1), kwargs = {})
# %select_scatter_default_7 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %select_scatter_default_6, 1, 1), kwargs = {})
triton_poi_fused_copy_mul_zeros_0 = async_compile.triton('triton_poi_fused_copy_mul_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_mul_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_mul_zeros_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 2
x0 = xindex % 4
x2 = (xindex // 8)
x4 = xindex
tmp5 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tmp3 == tmp1
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp1 == tmp8
tmp11 = -0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp8 == tmp1
tmp14 = tmp3 == tmp8
tmp15 = tmp5 * tmp11
tmp16 = 0.0
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.where(tmp9, tmp17, tmp16)
tmp19 = tl.where(tmp14, tmp15, tmp18)
tmp20 = tmp8 == tmp8
tmp21 = tl.where(tmp20, tmp17, tmp16)
tmp22 = tl.where(tmp13, tmp19, tmp21)
tmp23 = tl.where(tmp4, tmp12, tmp22)
tmp24 = tmp1 == tmp1
tmp25 = tl.where(tmp24, tmp19, tmp18)
tmp26 = tl.where(tmp9, tmp23, tmp25)
tmp27 = tl.where(tmp4, tmp7, tmp26)
tmp28 = tmp0 == tmp8
tmp29 = tl.where(tmp28, tmp17, tmp16)
tmp30 = tl.where(tmp2, tmp19, tmp29)
tmp31 = tl.where(tmp28, tmp23, tmp30)
tmp32 = tl.where(tmp2, tmp27, tmp31)
tl.store(out_ptr0 + (x4), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3f/c3fspsb4wnftpv7srev64wz5drcwh4znlaa4ozu34bydv7fgf3ec.py
# Topologically Sorted Source Nodes: [mul_4, setitem_4, mul_5, setitem_5], Original ATen: [aten.mul, aten.copy]
# Source node to ATen node mapping:
# mul_4 => mul_4
# mul_5 => mul_5
# setitem_4 => copy_4
# setitem_5 => copy_5
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, 0.5), kwargs = {})
# %copy_4 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_33, %mul_4), kwargs = {})
# %select_scatter_default_8 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_4, %copy_4, 1, 2), kwargs = {})
# %select_scatter_default_9 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_7, %select_scatter_default_8, 1, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, 0.5), kwargs = {})
# %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_40, %mul_5), kwargs = {})
# %select_scatter_default_10 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_5, %copy_5, 1, 2), kwargs = {})
# %select_scatter_default_11 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_9, %select_scatter_default_10, 1, 1), kwargs = {})
triton_poi_fused_copy_mul_1 = async_compile.triton('triton_poi_fused_copy_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 2
x0 = xindex % 4
x2 = (xindex // 8)
x4 = xindex
tmp6 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (4 + x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x4), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = tmp1 == tmp9
tmp12 = tmp11 * tmp7
tmp14 = tl.where(tmp5, tmp12, tmp13)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp5, tmp8, tmp16)
tmp18 = tmp0 == tmp9
tmp20 = tl.where(tmp18, tmp14, tmp19)
tmp21 = tl.where(tmp2, tmp17, tmp20)
tl.store(out_ptr0 + (x4), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iu/ciu3cvwvhzjanyxd65syrjj5yuao3cl5kkzw6xnwp4sbsybfljct.py
# Topologically Sorted Source Nodes: [mul_6, setitem_6, mul_7, setitem_7, cat, add], Original ATen: [aten.mul, aten.copy, aten.cat, aten.add]
# Source node to ATen node mapping:
# add => add
# cat => cat
# mul_6 => mul_6
# mul_7 => mul_7
# setitem_6 => copy_6
# setitem_7 => copy_7
# Graph fragment:
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, 0.5), kwargs = {})
# %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_47, %mul_6), kwargs = {})
# %select_scatter_default_12 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_6, %copy_6, 1, 3), kwargs = {})
# %select_scatter_default_13 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_11, %select_scatter_default_12, 1, 0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, -0.5), kwargs = {})
# %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_54, %mul_7), kwargs = {})
# %select_scatter_default_14 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_7, %copy_7, 1, 3), kwargs = {})
# %select_scatter_default_15 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_13, %select_scatter_default_14, 1, 1), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %unsqueeze_3], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_scatter_default_15, %cat), kwargs = {})
triton_poi_fused_add_cat_copy_mul_2 = async_compile.triton('triton_poi_fused_add_cat_copy_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_copy_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_copy_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 2
x0 = xindex % 4
x2 = (xindex // 8)
x4 = xindex
tmp6 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (4 + x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (x4), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = -0.5
tmp8 = tmp6 * tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = tmp1 == tmp9
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp15 = tl.where(tmp5, tmp13, tmp14)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp5, tmp8, tmp17)
tmp19 = tmp0 == tmp9
tmp21 = tl.where(tmp19, tmp15, tmp20)
tmp22 = tl.where(tmp2, tmp18, tmp21)
tmp23 = tl.full([1], 0, tl.int64)
tmp24 = tmp0 >= tmp23
tmp25 = tl.full([1], 1, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tl.load(in_ptr0 + (4*x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp0 >= tmp25
tmp29 = tl.full([1], 2, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tl.load(in_ptr0 + (1 + (4*x2)), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tl.where(tmp26, tmp27, tmp31)
tmp33 = tmp22 + tmp32
tl.store(out_ptr0 + (x4), tmp33, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [corners, mul, setitem, mul_1, setitem_1, mul_2, setitem_2, mul_3, setitem_3], Original ATen: [aten.zeros, aten.mul, aten.copy]
stream0 = get_raw_stream(0)
triton_poi_fused_copy_mul_zeros_0.run(arg0_1, buf0, 32, grid=grid(32), stream=stream0)
buf1 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_4, setitem_4, mul_5, setitem_5], Original ATen: [aten.mul, aten.copy]
triton_poi_fused_copy_mul_1.run(arg0_1, buf0, buf1, 32, grid=grid(32), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul_6, setitem_6, mul_7, setitem_7, cat, add], Original ATen: [aten.mul, aten.copy, aten.cat, aten.add]
triton_poi_fused_add_cat_copy_mul_2.run(arg0_1, buf1, buf2, 32, grid=grid(32), stream=stream0)
del arg0_1
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class rbbox_corners_aligned(nn.Module):
def _init_(self, gboxes):
super(rbbox_corners_aligned, self)._init_()
self.corners_gboxes = gboxes
return
def forward(ctx, gboxes):
"""
There is no rotation performed here. As axis are aligned.
^ [y]
1 --------- 2
/ / --->
0 -------- 3 [x]
Each node has the coordinate of [x, y]. Corresponding the order of input.
Output: [N, 2, 4]
[[x_0, x_1, x_2, x_3],
[y_0, y_1, y_2, y_3]].
"""
N = gboxes.shape[0]
center_x = gboxes[:, 0]
center_y = gboxes[:, 1]
x_d = gboxes[:, 2]
y_d = gboxes[:, 3]
corners = torch.zeros([N, 2, 4], device=gboxes.device, dtype=torch.
float32)
corners[:, 0, 0] = x_d.mul(-0.5)
corners[:, 1, 0] = y_d.mul(-0.5)
corners[:, 0, 1] = x_d.mul(-0.5)
corners[:, 1, 1] = y_d.mul(0.5)
corners[:, 0, 2] = x_d.mul(0.5)
corners[:, 1, 2] = y_d.mul(0.5)
corners[:, 0, 3] = x_d.mul(0.5)
corners[:, 1, 3] = y_d.mul(-0.5)
b = center_x.unsqueeze(1).repeat(1, 4).unsqueeze(1)
c = center_y.unsqueeze(1).repeat(1, 4).unsqueeze(1)
return corners + torch.cat((b, c), 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_copy_mul_zeros_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 2
x0 = xindex % 4
x2 = xindex // 8
x4 = xindex
tmp5 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tmp3 == tmp1
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp1 == tmp8
tmp11 = -0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp8 == tmp1
tmp14 = tmp3 == tmp8
tmp15 = tmp5 * tmp11
tmp16 = 0.0
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.where(tmp9, tmp17, tmp16)
tmp19 = tl.where(tmp14, tmp15, tmp18)
tmp20 = tmp8 == tmp8
tmp21 = tl.where(tmp20, tmp17, tmp16)
tmp22 = tl.where(tmp13, tmp19, tmp21)
tmp23 = tl.where(tmp4, tmp12, tmp22)
tmp24 = tmp1 == tmp1
tmp25 = tl.where(tmp24, tmp19, tmp18)
tmp26 = tl.where(tmp9, tmp23, tmp25)
tmp27 = tl.where(tmp4, tmp7, tmp26)
tmp28 = tmp0 == tmp8
tmp29 = tl.where(tmp28, tmp17, tmp16)
tmp30 = tl.where(tmp2, tmp19, tmp29)
tmp31 = tl.where(tmp28, tmp23, tmp30)
tmp32 = tl.where(tmp2, tmp27, tmp31)
tl.store(out_ptr0 + x4, tmp32, xmask)
@triton.jit
def triton_poi_fused_copy_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 2
x0 = xindex % 4
x2 = xindex // 8
x4 = xindex
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (4 + x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + x4, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = tmp1 == tmp9
tmp12 = tmp11 * tmp7
tmp14 = tl.where(tmp5, tmp12, tmp13)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp5, tmp8, tmp16)
tmp18 = tmp0 == tmp9
tmp20 = tl.where(tmp18, tmp14, tmp19)
tmp21 = tl.where(tmp2, tmp17, tmp20)
tl.store(out_ptr0 + x4, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_cat_copy_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 2
x0 = xindex % 4
x2 = xindex // 8
x4 = xindex
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (4 + x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + x4, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tl.full([1], 3, tl.int32)
tmp5 = tmp3 == tmp4
tmp7 = -0.5
tmp8 = tmp6 * tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = tmp1 == tmp9
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp15 = tl.where(tmp5, tmp13, tmp14)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp5, tmp8, tmp17)
tmp19 = tmp0 == tmp9
tmp21 = tl.where(tmp19, tmp15, tmp20)
tmp22 = tl.where(tmp2, tmp18, tmp21)
tl.full([1], 0, tl.int64)
tmp25 = tl.full([1], 1, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tl.load(in_ptr0 + 4 * x2, tmp26 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp0 >= tmp25
tl.full([1], 2, tl.int64)
tmp31 = tl.load(in_ptr0 + (1 + 4 * x2), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tl.where(tmp26, tmp27, tmp31)
tmp33 = tmp22 + tmp32
tl.store(out_ptr0 + x4, tmp33, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_mul_zeros_0[grid(32)](arg0_1, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
triton_poi_fused_copy_mul_1[grid(32)](arg0_1, buf0, buf1, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_add_cat_copy_mul_2[grid(32)](arg0_1, buf1, buf2,
32, XBLOCK=32, num_warps=1, num_stages=1)
del arg0_1
del buf1
return buf2,
class rbbox_corners_alignedNew(nn.Module):
def _init_(self, gboxes):
super(rbbox_corners_alignedNew, self)._init_()
self.corners_gboxes = gboxes
return
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| liuhuaijjin/rpn_rois_proposals_layers | rbbox_corners_aligned | false | 7,111 | [
"MIT"
] | 1 | c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | https://github.com/liuhuaijjin/rpn_rois_proposals_layers/tree/c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | import torch
import torch.nn as nn
class Model(nn.Module):
def _init_(self, gboxes):
super(rbbox_corners_aligned, self)._init_()
self.corners_gboxes = gboxes
return
def forward(ctx, gboxes):
"""
There is no rotation performed here. As axis are aligned.
^ [y]
1 --------- 2
/ / --->
0 -------- 3 [x]
Each node has the coordinate of [x, y]. Corresponding the order of input.
Output: [N, 2, 4]
[[x_0, x_1, x_2, x_3],
[y_0, y_1, y_2, y_3]].
"""
N = gboxes.shape[0]
center_x = gboxes[:, 0]
center_y = gboxes[:, 1]
x_d = gboxes[:, 2]
y_d = gboxes[:, 3]
corners = torch.zeros([N, 2, 4], device=gboxes.device, dtype=torch.
float32)
corners[:, 0, 0] = x_d.mul(-0.5)
corners[:, 1, 0] = y_d.mul(-0.5)
corners[:, 0, 1] = x_d.mul(-0.5)
corners[:, 1, 1] = y_d.mul(0.5)
corners[:, 0, 2] = x_d.mul(0.5)
corners[:, 1, 2] = y_d.mul(0.5)
corners[:, 0, 3] = x_d.mul(0.5)
corners[:, 1, 3] = y_d.mul(-0.5)
b = center_x.unsqueeze(1).repeat(1, 4).unsqueeze(1)
c = center_y.unsqueeze(1).repeat(1, 4).unsqueeze(1)
return corners + torch.cat((b, c), 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
SpatialAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/46/c46mg7rvdztu6n5oosf5c4if7ziag6obrxhwbn43lcdfibfuom7w.py
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# scale => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %getitem], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6q/c6qyrmvchep2lyeodxjgze7brt2fv4khvsx2os2smplvfajckxaz.py
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 3, 3), (18, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
# Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(primals_1, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_2, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 3, 3), (18, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=3):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
scale = torch.cat([avg_out, max_out], dim=1)
scale = self.conv(scale)
return x * self.sigmoid(scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 3, 3), (18, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](primals_1, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1
class SpatialAttentionNew(nn.Module):
def __init__(self, kernel_size=3):
super(SpatialAttentionNew, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| ljjyxz123/CenterMask | SpatialAttention | false | 7,112 | [
"BSD-2-Clause"
] | 1 | 443eebde30e209eeb3b953f7ef35d3f7f14aaca5 | https://github.com/ljjyxz123/CenterMask/tree/443eebde30e209eeb3b953f7ef35d3f7f14aaca5 | import torch
import torch.utils.data
from torch import nn
class Model(nn.Module):
def __init__(self, kernel_size=3):
super().__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
scale = torch.cat([avg_out, max_out], dim=1)
scale = self.conv(scale)
return x * self.sigmoid(scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
coRNNCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nc/cnc6a3vkphurm472zdavmn3qnff4lmaezxs63jlllw2kks2e62a4.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lo/clojvten7dzuavcb5hvvy3hpwk3wb7evis7nrr2wadmad7bq3vr5.py
# Topologically Sorted Source Nodes: [tanh, mul, sub, mul_1, sub_1, mul_2, hz, mul_3, hy], Original ATen: [aten.tanh, aten.mul, aten.sub, aten.add]
# Source node to ATen node mapping:
# hy => add_1
# hz => add
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# sub => sub
# sub_1 => sub_1
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%addmm,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%tanh, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 4), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 4), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {})
triton_poi_fused_add_mul_sub_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sub_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sub_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sub_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = libdevice.tanh(tmp1)
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 - tmp5
tmp7 = tmp0 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp0 + tmp9
tmp11 = tmp10 * tmp4
tmp12 = tmp3 + tmp11
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, mul, sub, mul_1, sub_1, mul_2, hz, mul_3, hy], Original ATen: [aten.tanh, aten.mul, aten.sub, aten.add]
triton_poi_fused_add_mul_sub_tanh_1.run(primals_2, buf1, primals_3, buf2, buf3, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
return (buf3, buf2, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.utils
class coRNNCell(nn.Module):
def __init__(self, n_inp, n_hid, dt, gamma, epsilon):
super(coRNNCell, self).__init__()
self.dt = dt
self.gamma = gamma
self.epsilon = epsilon
self.i2h = nn.Linear(n_inp + n_hid + n_hid, n_hid)
def forward(self, x, hy, hz):
hz = hz + self.dt * (torch.tanh(self.i2h(torch.cat((x, hz, hy), 1))
) - self.gamma * hy - self.epsilon * hz)
hy = hy + self.dt * hz
return hy, hz
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_inp': 4, 'n_hid': 4, 'dt': 4, 'gamma': 4, 'epsilon': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_mul_sub_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp2 = libdevice.tanh(tmp1)
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 - tmp5
tmp7 = tmp0 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp0 + tmp9
tmp11 = tmp10 * tmp4
tmp12 = tmp3 + tmp11
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3,
buf0, 48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sub_tanh_1[grid(16)](primals_2, buf1,
primals_3, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
del primals_3
return buf3, buf2, buf0, buf1
class coRNNCellNew(nn.Module):
def __init__(self, n_inp, n_hid, dt, gamma, epsilon):
super(coRNNCellNew, self).__init__()
self.dt = dt
self.gamma = gamma
self.epsilon = epsilon
self.i2h = nn.Linear(n_inp + n_hid + n_hid, n_hid)
def forward(self, input_0, input_1, input_2):
primals_4 = self.i2h.weight
primals_5 = self.i2h.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| lkampoli/coRNN | coRNNCell | false | 7,113 | [
"MIT"
] | 1 | c9c2edfebab289f3053eb48030f273e4b977a187 | https://github.com/lkampoli/coRNN/tree/c9c2edfebab289f3053eb48030f273e4b977a187 | import torch
from torch import nn
import torch.nn.utils
class Model(nn.Module):
def __init__(self, n_inp, n_hid, dt, gamma, epsilon):
super().__init__()
self.dt = dt
self.gamma = gamma
self.epsilon = epsilon
self.i2h = nn.Linear(n_inp + n_hid + n_hid, n_hid)
def forward(self, x, hy, hz):
hz = hz + self.dt * (torch.tanh(self.i2h(torch.cat((x, hz, hy), 1))
) - self.gamma * hy - self.epsilon * hz)
hy = hy + self.dt * hz
return hy, hz
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4, 4, 4]
|
CatCombine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
class CatCombine(nn.Module):
def __init__(self, C):
super(CatCombine, self).__init__()
self.compress = nn.Linear(C * 2, C)
def forward(self, x, y):
return self.compress(torch.cat((x, y), dim=-1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'C': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0)
class CatCombineNew(nn.Module):
def __init__(self, C):
super(CatCombineNew, self).__init__()
self.compress = nn.Linear(C * 2, C)
def forward(self, input_0, input_1):
primals_3 = self.compress.weight
primals_4 = self.compress.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| lorylei/DARTS-et | CatCombine | false | 7,114 | [
"Apache-2.0"
] | 1 | f22cfd53c14afd6ba602b8ecfbff9cdf77fc2ff8 | https://github.com/lorylei/DARTS-et/tree/f22cfd53c14afd6ba602b8ecfbff9cdf77fc2ff8 | import torch
import torch.nn as nn
import torch.utils
class Model(nn.Module):
def __init__(self, C):
super().__init__()
self.compress = nn.Linear(C * 2, C)
def forward(self, x, y):
return self.compress(torch.cat((x, y), dim=-1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
FBANKNormalizer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kg/ckgkkwtkw5zyiamt7prgpgrh2tdnroejhxpxpbr4whhobwysvwgg.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# out => add
# out_1 => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %unsqueeze), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %unsqueeze_1), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.utils.data
class FBANKNormalizer(torch.nn.Module):
def __init__(self, config):
super(FBANKNormalizer, self).__init__()
self.num_mel_bins = config.num_mel_bins
self.weight = torch.nn.Parameter(torch.tensor([1 / 10] * self.
num_mel_bins))
self.bias = torch.nn.Parameter(torch.tensor([0.0] * self.num_mel_bins))
def forward(self, fbank):
out = fbank + self.bias.unsqueeze(0)
out = out * self.weight.unsqueeze(0)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(num_mel_bins=4)}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2, primals_3
class FBANKNormalizerNew(torch.nn.Module):
def __init__(self, config):
super(FBANKNormalizerNew, self).__init__()
self.num_mel_bins = config.num_mel_bins
self.weight = torch.nn.Parameter(torch.tensor([1 / 10] * self.
num_mel_bins))
self.bias = torch.nn.Parameter(torch.tensor([0.0] * self.num_mel_bins))
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lorenlugosch/autoregressive-models | FBANKNormalizer | false | 7,115 | [
"Apache-2.0"
] | 1 | 2c50bc331d3b68cc7144f7456591bbc2321cc658 | https://github.com/lorenlugosch/autoregressive-models/tree/2c50bc331d3b68cc7144f7456591bbc2321cc658 | from _paritybench_helpers import _mock_config
import torch
import torch.utils.data
class Model(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.num_mel_bins = config.num_mel_bins
self.weight = torch.nn.Parameter(torch.tensor([1 / 10] * self.
num_mel_bins))
self.bias = torch.nn.Parameter(torch.tensor([0.0] * self.num_mel_bins))
def forward(self, fbank):
out = fbank + self.bias.unsqueeze(0)
out = out * self.weight.unsqueeze(0)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CNNLayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sq/csqrftqwwlhlb6o2kvwtb7kxokd5iwdxonje4wrris4m67cqpxjp.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.clone, aten.native_layer_norm]
# Source node to ATen node mapping:
# x => clone
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_clone_native_layer_norm_0 = async_compile.triton('triton_poi_fused_clone_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/y7/cy7g6tmlgs4d3glwyuw2dxorndk2fxlo2fwdaj6azuxk74enr5vn.py
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.clone, aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
return (buf2, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CNNLayerNorm(nn.Module):
"""Layer normalization built for cnns input"""
def __init__(self, n_feats: 'int'):
super(CNNLayerNorm, self).__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, x: 'torch.tensor') ->torch.tensor:
x = x.transpose(2, 3).contiguous()
x = self.layer_norm(x)
return x.transpose(2, 3).contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_feats': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_native_layer_norm_0[grid(64)](primals_1,
buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_1, buf0, buf1,
primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return buf2, primals_1
class CNNLayerNormNew(nn.Module):
"""Layer normalization built for cnns input"""
def __init__(self, n_feats: 'int'):
super(CNNLayerNormNew, self).__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, input_0):
primals_2 = self.layer_norm.weight
primals_3 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| loopdigga96/numbers_recognition | CNNLayerNorm | false | 7,116 | [
"Apache-2.0"
] | 1 | dd1110d3fd18b5ca20278a010c550aeaad495e19 | https://github.com/loopdigga96/numbers_recognition/tree/dd1110d3fd18b5ca20278a010c550aeaad495e19 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Layer normalization built for cnns input"""
def __init__(self, n_feats: 'int'):
super().__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, x: 'torch.tensor') ->torch.tensor:
x = x.transpose(2, 3).contiguous()
x = self.layer_norm(x)
return x.transpose(2, 3).contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CausalPad | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/to/ctok5xpzvzzspgzbb4ppw5exunijjjynrq6wq7yp5kvmjrbkbcn5.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [0, 0, 1, 0], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 5
x2 = (xindex // 20)
x3 = xindex % 20
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + ((-4) + x3 + (16*x2)), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 320, grid=grid(320), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class CausalPad(torch.nn.Module):
def __init__(self):
super(CausalPad, self).__init__()
def forward(self, input):
return torch.nn.functional.pad(input, (0, 0, 1, 0))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x2 = xindex // 20
x3 = xindex % 20
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(320)](arg0_1, buf0, 320,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CausalPadNew(torch.nn.Module):
def __init__(self):
super(CausalPadNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| lorenlugosch/autoregressive-models | CausalPad | false | 7,117 | [
"Apache-2.0"
] | 1 | 2c50bc331d3b68cc7144f7456591bbc2321cc658 | https://github.com/lorenlugosch/autoregressive-models/tree/2c50bc331d3b68cc7144f7456591bbc2321cc658 | import torch
import torch.utils.data
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return torch.nn.functional.pad(input, (0, 0, 1, 0))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
_Residual_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tc/ctchh66224xunisoqylba7qzpsl47ose6e5qu7bcgrxigfwirvf4.py
# Topologically Sorted Source Nodes: [instance_norm, output], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# instance_norm => add, add_1, mul, mul_1, repeat, rsqrt, sub, var_mean
# output => gt, mul_2, where
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_2), kwargs = {})
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0 = async_compile.triton('triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = triton_helpers.welford_reduce(
tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(
tmp3_mean, tmp3_m2, tmp3_weight, 1
)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp3, xmask)
tmp15 = tl.load(in_ptr2 + (x0 % 64), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.2
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(in_out_ptr0 + (r1 + (4096*x0)), tmp21, rmask & xmask)
tmp22 = 4096.0
tmp23 = tmp4 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tl.store(out_ptr3 + (x0), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/st/cstrjrus2xtc6elawqi42w5evvdehzcp4d5fjmxji4eznl5szfll.py
# Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# output_1 => add_2, repeat_2, rsqrt_1, var_mean_1
# output_2 => add_4
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_6, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %primals_1), kwargs = {})
triton_red_fused__native_batch_norm_legit_add_repeat_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_add_repeat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_add_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = triton_helpers.welford_reduce(
tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(
tmp3_mean, tmp3_m2, tmp3_weight, 1
)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp3, xmask)
x2 = xindex % 64
tmp15 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp17 = tl.load(in_ptr3 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr3 + (r1 + (4096*x0)), tmp18, rmask & xmask)
tmp19 = 4096.0
tmp20 = tmp4 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr4 + (x0), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, ), (1, ))
assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (64, ), (1, ))
assert_size_stride(primals_7, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf6 = empty_strided_cuda((1, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1), 0); del buf6 # reuse
buf5 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm, output], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0.run(buf7, primals_3, buf0, primals_4, buf1, buf2, buf5, 256, 4096, grid=grid(256), stream=stream0)
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf10 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf14 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf13 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_red_fused__native_batch_norm_legit_add_repeat_1.run(primals_6, buf8, primals_7, primals_1, buf9, buf10, buf14, buf13, 256, 4096, grid=grid(256), stream=stream0)
del primals_6
del primals_7
return (buf14, primals_1, primals_2, primals_5, buf0, buf1, reinterpret_tensor(buf5, (256, ), (1, ), 0), buf7, buf8, buf9, reinterpret_tensor(buf13, (256, ), (1, ), 0), reinterpret_tensor(buf10, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 256, 1, 1), (256, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class _Residual_Block(nn.Module):
def __init__(self):
super(_Residual_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, x):
identity_data = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output, identity_data)
return output
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5_tmp[:, None]
tl.store(out_ptr1 + x0, tmp3, xmask)
tmp15 = tl.load(in_ptr2 + x0 % 64, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.2
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(in_out_ptr0 + (r1 + 4096 * x0), tmp21, rmask & xmask)
tmp22 = 4096.0
tmp23 = tmp4 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tl.store(out_ptr3 + x0, tmp26, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5_tmp[:, None]
tl.store(out_ptr1 + x0, tmp3, xmask)
x2 = xindex % 64
tmp15 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp17 = tl.load(in_ptr3 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr3 + (r1 + 4096 * x0), tmp18, rmask & xmask)
tmp19 = 4096.0
tmp20 = tmp4 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr4 + x0, tmp23, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64,), (1,))
assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((256,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
buf6 = empty_strided_cuda((1, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 64, 64, 64), (262144, 4096, 64,
1), 0)
del buf6
buf5 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
get_raw_stream(0)
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_0[grid(256)
](buf7, primals_3, buf0, primals_4, buf1, buf2, buf5, 256, 4096,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_3
del primals_4
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = empty_strided_cuda((256,), (1,), torch.float32)
buf10 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf14 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf13 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_red_fused__native_batch_norm_legit_add_repeat_1[grid(256)](
primals_6, buf8, primals_7, primals_1, buf9, buf10, buf14,
buf13, 256, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
del primals_6
del primals_7
return (buf14, primals_1, primals_2, primals_5, buf0, buf1,
reinterpret_tensor(buf5, (256,), (1,), 0), buf7, buf8, buf9,
reinterpret_tensor(buf13, (256,), (1,), 0), reinterpret_tensor(
buf10, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf2,
(1, 256, 1, 1), (256, 1, 1, 1), 0))
class _Residual_BlockNew(nn.Module):
def __init__(self):
super(_Residual_BlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.in1.weight
primals_4 = self.in1.bias
primals_5 = self.conv2.weight
primals_6 = self.in2.weight
primals_7 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| liruilong940607/SRResnet | _Residual_Block | false | 7,118 | [
"MIT"
] | 1 | 928b1c076bfa051dffd5165ea966af5dfd9c372d | https://github.com/liruilong940607/SRResnet/tree/928b1c076bfa051dffd5165ea966af5dfd9c372d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(64, affine=True)
def forward(self, x):
identity_data = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output, identity_data)
return output
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return []
|
FC_Layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ck/cckzkicqhrcfrkfdju5exjvt5jnqvtxaazswol3wnjnjike2p45t.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# input_2 => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_3, 4, grid=grid(4), stream=stream0)
del primals_3
return (buf1, primals_1, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def standardize(param, assert_length):
if type(param) is not list and type(param) is not tuple:
param = [param] * assert_length
assert len(param
) == assert_length, 'expect %s input params, got %s input parameter' % (
assert_length, len(param))
return param
def fc_layer(input, layer_size, bias=True, name=None, activation=nn.Sigmoid
(), batch_norm=None, dropout=0):
layer_size = [input] + [layer_size] if type(layer_size) is not list else [
input] + layer_size
assert_length = len(layer_size) - 1
bias = standardize(bias, assert_length)
activation = standardize(activation, assert_length)
batch_norm = standardize(batch_norm, assert_length)
dropout = standardize(dropout, assert_length)
if name is None:
name = ''
modules = nn.Sequential()
for i in range(len(layer_size) - 1):
modules.add_module(name + '_fc_' + str(i), nn.Linear(layer_size[i],
layer_size[i + 1], bias[i]))
if batch_norm[i]:
modules.add_module(name + 'bn_' + str(i), batch_norm[i](
layer_size[i + 1]))
if activation[i]:
modules.add_module(name + 'act_' + str(i), activation[i])
if dropout[i] > 0:
modules.add_module(name + 'drop_' + str(i), nn.Dropout2d(
dropout[i]))
return modules
class FC_Layer(nn.Module):
def __init__(self, input, layer_size, bias=True, name=None, activation=
nn.Sigmoid(), batch_norm=None, dropout=0):
super().__init__()
self.fc_layer = fc_layer(input, layer_size, bias=bias, name=name,
activation=activation, batch_norm=batch_norm, dropout=dropout)
def forward(self, x, batch_dim=0):
if len(x.shape):
x = x.view(x.size(batch_dim), -1)
return self.fc_layer.forward(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input': 4, 'layer_size': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(4)](buf1, primals_3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
return buf1, primals_1, buf1
def standardize(param, assert_length):
if type(param) is not list and type(param) is not tuple:
param = [param] * assert_length
assert len(param
) == assert_length, 'expect %s input params, got %s input parameter' % (
assert_length, len(param))
return param
def fc_layer(input, layer_size, bias=True, name=None, activation=nn.Sigmoid
(), batch_norm=None, dropout=0):
layer_size = [input] + [layer_size] if type(layer_size) is not list else [
input] + layer_size
assert_length = len(layer_size) - 1
bias = standardize(bias, assert_length)
activation = standardize(activation, assert_length)
batch_norm = standardize(batch_norm, assert_length)
dropout = standardize(dropout, assert_length)
if name is None:
name = ''
modules = nn.Sequential()
for i in range(len(layer_size) - 1):
modules.add_module(name + '_fc_' + str(i), nn.Linear(layer_size[i],
layer_size[i + 1], bias[i]))
if batch_norm[i]:
modules.add_module(name + 'bn_' + str(i), batch_norm[i](
layer_size[i + 1]))
if activation[i]:
modules.add_module(name + 'act_' + str(i), activation[i])
if dropout[i] > 0:
modules.add_module(name + 'drop_' + str(i), nn.Dropout2d(
dropout[i]))
return modules
class FC_LayerNew(nn.Module):
def __init__(self, input, layer_size, bias=True, name=None, activation=
nn.Sigmoid(), batch_norm=None, dropout=0):
super().__init__()
self.fc_layer = fc_layer(input, layer_size, bias=bias, name=name,
activation=activation, batch_norm=batch_norm, dropout=dropout)
def forward(self, input_0):
primals_2 = self.fc_layer._fc_0.weight
primals_3 = self.fc_layer._fc_0.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| loveorchids/omni_torch | FC_Layer | false | 7,119 | [
"Apache-2.0"
] | 1 | 9bd654387619c0cbc6aee9e91482ecc9200138ef | https://github.com/loveorchids/omni_torch/tree/9bd654387619c0cbc6aee9e91482ecc9200138ef | import torch
import torch.nn as nn
def standardize(param, assert_length):
if type(param) is not list and type(param) is not tuple:
param = [param] * assert_length
assert len(param
) == assert_length, 'expect %s input params, got %s input parameter' % (
assert_length, len(param))
return param
def fc_layer(input, layer_size, bias=True, name=None, activation=nn.Sigmoid
(), batch_norm=None, dropout=0):
layer_size = [input] + [layer_size] if type(layer_size) is not list else [
input] + layer_size
assert_length = len(layer_size) - 1
bias = standardize(bias, assert_length)
activation = standardize(activation, assert_length)
batch_norm = standardize(batch_norm, assert_length)
dropout = standardize(dropout, assert_length)
if name is None:
name = ''
modules = nn.Sequential()
for i in range(len(layer_size) - 1):
modules.add_module(name + '_fc_' + str(i), nn.Linear(layer_size[i],
layer_size[i + 1], bias[i]))
if batch_norm[i]:
modules.add_module(name + 'bn_' + str(i), batch_norm[i](
layer_size[i + 1]))
if activation[i]:
modules.add_module(name + 'act_' + str(i), activation[i])
if dropout[i] > 0:
modules.add_module(name + 'drop_' + str(i), nn.Dropout2d(
dropout[i]))
return modules
class Model(nn.Module):
def __init__(self, input, layer_size, bias=True, name=None, activation=
nn.Sigmoid(), batch_norm=None, dropout=0):
super().__init__()
self.fc_layer = fc_layer(input, layer_size, bias=bias, name=name,
activation=activation, batch_norm=batch_norm, dropout=dropout)
def forward(self, x, batch_dim=0):
if len(x.shape):
x = x.view(x.size(batch_dim), -1)
return self.fc_layer.forward(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 1]
|
Conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sn/csngmasb7xqzu32pyy3hbq5jedqbr4vi5bssrudzhxtdswrdhcqz.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%permute, [2, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 8], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = (-2) + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-8) + y0 + (4*x2) + (16*y1)), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x2 + (8*y3)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yl/cylfgllvrtc2se3m75q5dqdhxbivuwmtmb6muivbg6ax7phdkdxq.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_2 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 5) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 16, 8, grid=grid(16, 8), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5), (20, 5, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 80, grid=grid(80), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.utils
import torch.utils.checkpoint
class Conv(torch.nn.Module):
def __init__(self, in_dim, out_dim, filter_length, stride):
super(Conv, self).__init__()
self.conv = torch.nn.Conv1d(in_channels=in_dim, out_channels=
out_dim, kernel_size=filter_length, stride=stride)
self.filter_length = filter_length
def forward(self, x):
out = x.transpose(1, 2)
left_padding = int(self.filter_length / 2)
right_padding = int(self.filter_length / 2)
out = torch.nn.functional.pad(out, (left_padding, right_padding))
out = self.conv(out)
out = out.transpose(1, 2)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'filter_length': 4, 'stride': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.utils
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = -2 + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-8 + y0 + 4 * x2 + 16 * y1), tmp5 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x2 + 8 * y3), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 5 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(16, 8)](primals_1, buf0, 16,
8, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5), (20, 5, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(80)](buf2, primals_3, 80,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), primals_2, buf0
class ConvNew(torch.nn.Module):
def __init__(self, in_dim, out_dim, filter_length, stride):
super(ConvNew, self).__init__()
self.conv = torch.nn.Conv1d(in_channels=in_dim, out_channels=
out_dim, kernel_size=filter_length, stride=stride)
self.filter_length = filter_length
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lorenlugosch/graves-transducers | Conv | false | 7,120 | [
"Apache-2.0"
] | 1 | 489f46d58eba35d34163bb8b887c31d6e043c990 | https://github.com/lorenlugosch/graves-transducers/tree/489f46d58eba35d34163bb8b887c31d6e043c990 | import torch
import torch.utils.data
import torch.utils
import torch.utils.checkpoint
class Model(torch.nn.Module):
def __init__(self, in_dim, out_dim, filter_length, stride):
super().__init__()
self.conv = torch.nn.Conv1d(in_channels=in_dim, out_channels=
out_dim, kernel_size=filter_length, stride=stride)
self.filter_length = filter_length
def forward(self, x):
out = x.transpose(1, 2)
left_padding = int(self.filter_length / 2)
right_padding = int(self.filter_length / 2)
out = torch.nn.functional.pad(out, (left_padding, right_padding))
out = self.conv(out)
out = out.transpose(1, 2)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 1]
|
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sr/csrzlteph4svc746shxzwrfzfygp3ngujwxcrnvcusqhc43dtftf.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# y => add, clone, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kr/ckrgsxswvgegsbqfoto5m7jeyj5kla75z75anayv7klydrtg2kle.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# y => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.LayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
"""Layer Norm."""
super(LayerNorm, self).__init__(normalized_shape, eps=eps,
elementwise_affine=elementwise_affine)
def forward(self, x):
x = x.permute(0, 2, 1)
y = super(LayerNorm, self).forward(x)
y = y.permute(0, 2, 1)
return y
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'normalized_shape': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16, 4)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, 4, XBLOCK=4, YBLOCK=8,
num_warps=1, num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_1
class LayerNormNew(nn.LayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
"""Layer Norm."""
super(LayerNormNew, self).__init__(normalized_shape, eps=eps,
elementwise_affine=elementwise_affine)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lorinczb/pytorch-dc-tts | LayerNorm | false | 7,121 | [
"MIT"
] | 1 | 9dae50678113e2f60ad0752b99b959bb0b11dfc9 | https://github.com/lorinczb/pytorch-dc-tts/tree/9dae50678113e2f60ad0752b99b959bb0b11dfc9 | import torch
import torch.nn as nn
class Model(nn.LayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
"""Layer Norm."""
super().__init__(normalized_shape, eps=eps,
elementwise_affine=elementwise_affine)
def forward(self, x):
x = x.permute(0, 2, 1)
y = super(LayerNorm, self).forward(x)
y = y.permute(0, 2, 1)
return y
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
mbr_convex_hull | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kh/ckhtqfawq2kdlfjxi7ht5pfiebeork7wrcmpyo755duufkdrsqhm.py
# Topologically Sorted Source Nodes: [a, b], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# a => cat
# b => cat_1
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cos, %cos_1], 1), kwargs = {})
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cos_2, %cos_3], 1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (12 + (8*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (4 + (8*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = -tmp6
tmp8 = tmp5 + tmp7
tmp9 = tl.load(in_ptr0 + (8 + (8*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr0 + ((8*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = -tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.atan2(tmp8, tmp12)
tmp14 = 1.5707963
tmp15 = libdevice.fmod(tmp13, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = tl_math.cos(tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp4, tmp17, tmp18)
tmp20 = tmp0 >= tmp3
tmp21 = tl.full([1], 8, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr0 + (12 + (8*x1) + ((-4) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr0 + (4 + (8*x1) + ((-4) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = -tmp24
tmp26 = tmp23 + tmp25
tmp27 = tl.load(in_ptr0 + (8 + (8*x1) + ((-4) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr0 + ((8*x1) + ((-4) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = -tmp28
tmp30 = tmp27 + tmp29
tmp31 = libdevice.atan2(tmp26, tmp30)
tmp32 = libdevice.fmod(tmp31, tmp14)
tmp33 = tl_math.abs(tmp32)
tmp34 = tmp33 - tmp14
tmp35 = tl_math.cos(tmp34)
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp20, tmp35, tmp36)
tmp38 = tl.where(tmp4, tmp19, tmp37)
tmp39 = tmp16 + tmp14
tmp40 = tl_math.cos(tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp4, tmp40, tmp41)
tmp43 = tl_math.cos(tmp33)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp20, tmp43, tmp44)
tmp46 = tl.where(tmp4, tmp42, tmp45)
tl.store(out_ptr0 + (x2), tmp38, xmask)
tl.store(out_ptr1 + (x2), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q4/cq4m4vhr5g5n2fs4y4nu64bh5neuox6dalmxljp6fzzrnfl56umx.py
# Topologically Sorted Source Nodes: [R_tensor], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# R_tensor => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 2
x0 = xindex % 8
x2 = (xindex // 16)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (8*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (8*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ju/cjuomzxntkde2voilgp5hg4xn7wi5ku73hgurst4ze6bfozlugyt.py
# Topologically Sorted Source Nodes: [rot_points], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# rot_points => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 2
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (8*x1)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ju/cjucyy734rzs3nnc55tq6fjkyiudqyv5dccouryil7yjdqun37oj.py
# Topologically Sorted Source Nodes: [sub_1, sub_2, areas, min_2], Original ATen: [aten.sub, aten.mul, aten.min]
# Source node to ATen node mapping:
# areas => mul
# min_2 => min_2
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_4, %select_5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %sub_2), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.default](args = (%mul,), kwargs = {})
triton_per_fused_min_mul_sub_3 = async_compile.triton('triton_per_fused_min_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_min_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_min_mul_sub_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 4
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (8 + r0 + (16*r1)), rmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (12 + r0 + (16*r1)), rmask, other=0.0)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = tmp2 - tmp3
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = triton_helpers.minimum(tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tmp4 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(rmask, tmp11, float("inf"))
tmp14 = triton_helpers.min2(tmp13, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 2, 4), (8, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((3, 8), (8, 1), torch.float32)
buf1 = empty_strided_cuda((3, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [a, b], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg0_1, buf0, buf1, 24, grid=grid(24), stream=stream0)
buf2 = empty_strided_cuda((3, 2, 2, 4), (16, 8, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [R_tensor], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf0, buf1, buf2, 48, grid=grid(48), stream=stream0)
del buf0
del buf1
buf3 = empty_strided_cuda((3, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [rot_points], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(arg0_1, buf3, 96, grid=grid(96), stream=stream0)
del arg0_1
buf4 = empty_strided_cuda((6, 2, 4), (8, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [rot_points], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (6, 2, 4), (8, 4, 1), 0), reinterpret_tensor(buf3, (6, 4, 4), (16, 4, 1), 0), out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub_1, sub_2, areas, min_2], Original ATen: [aten.sub, aten.mul, aten.min]
triton_per_fused_min_mul_sub_3.run(buf4, buf5, 1, 12, grid=grid(1), stream=stream0)
del buf4
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 2, 4), (8, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class mbr_convex_hull(nn.Module):
"""
Miminum Bounding Rectangle (MBR)
Algorithm core: The orientation of the MBR is the same as the one of one of the edges of the point cloud convex hull, which means
the result rectangle must overlap with at least one of the edges.
"""
def _init_(self, hull_points_2d):
super(mbr_convex_hull, self)._init_()
self.hull_points_2d = hull_points_2d
return
def forward(ctx, hull_points_2d):
N = hull_points_2d.shape[0]
edges = hull_points_2d[1:N, :].add(-hull_points_2d[0:N - 1, :])
edge_angles = torch.atan2(edges[:, 1], edges[:, 0])
edge_angles = torch.fmod(edge_angles, 3.1415926 / 2.0)
edge_angles = torch.abs(edge_angles)
a = torch.stack((torch.cos(edge_angles), torch.cos(edge_angles -
3.1415926 / 2.0)), 1)
a = torch.unsqueeze(a, 1)
b = torch.stack((torch.cos(edge_angles + 3.1415926 / 2.0), torch.
cos(edge_angles)), 1)
b = torch.unsqueeze(b, 1)
R_tensor = torch.cat((a, b), 1)
hull_points_2d_ = torch.unsqueeze(torch.transpose(hull_points_2d, 0,
1), 0)
rot_points = R_tensor.matmul(hull_points_2d_)
min_x = torch.min(rot_points, 2)[0]
max_x = torch.max(rot_points, 2)[0]
areas = (max_x[:, 0] - min_x[:, 0]).mul(max_x[:, 1] - min_x[:, 1])
return torch.min(areas)
def get_inputs():
return [torch.rand([4, 2, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (12 + 8 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (4 + 8 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = -tmp6
tmp8 = tmp5 + tmp7
tmp9 = tl.load(in_ptr0 + (8 + 8 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr0 + (8 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = -tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.atan2(tmp8, tmp12)
tmp14 = 1.5707963
tmp15 = libdevice.fmod(tmp13, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = tl_math.cos(tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp4, tmp17, tmp18)
tmp20 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp23 = tl.load(in_ptr0 + (12 + 8 * x1 + (-4 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr0 + (4 + 8 * x1 + (-4 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = -tmp24
tmp26 = tmp23 + tmp25
tmp27 = tl.load(in_ptr0 + (8 + 8 * x1 + (-4 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr0 + (8 * x1 + (-4 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp29 = -tmp28
tmp30 = tmp27 + tmp29
tmp31 = libdevice.atan2(tmp26, tmp30)
tmp32 = libdevice.fmod(tmp31, tmp14)
tmp33 = tl_math.abs(tmp32)
tmp34 = tmp33 - tmp14
tmp35 = tl_math.cos(tmp34)
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp20, tmp35, tmp36)
tmp38 = tl.where(tmp4, tmp19, tmp37)
tmp39 = tmp16 + tmp14
tmp40 = tl_math.cos(tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp4, tmp40, tmp41)
tmp43 = tl_math.cos(tmp33)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp20, tmp43, tmp44)
tmp46 = tl.where(tmp4, tmp42, tmp45)
tl.store(out_ptr0 + x2, tmp38, xmask)
tl.store(out_ptr1 + x2, tmp46, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 2
x0 = xindex % 8
x2 = xindex // 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 8 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 8 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 2
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 8 * x1), xmask, eviction_policy
='evict_last')
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_per_fused_min_mul_sub_3(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 4
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), rmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), rmask, other=0.0)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = tmp2 - tmp3
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = triton_helpers.minimum(tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tmp4 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(rmask, tmp11, float('inf'))
tmp14 = triton_helpers.min2(tmp13, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 2, 4), (8, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((3, 8), (8, 1), torch.float32)
buf1 = empty_strided_cuda((3, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(24)](arg0_1, buf0, buf1, 24, XBLOCK=
32, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((3, 2, 2, 4), (16, 8, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(48)](buf0, buf1, buf2, 48, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((3, 2, 4, 4), (32, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(96)](arg0_1, buf3, 96, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((6, 2, 4), (8, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (6, 2, 4), (8, 4, 1), 0
), reinterpret_tensor(buf3, (6, 4, 4), (16, 4, 1), 0), out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_min_mul_sub_3[grid(1)](buf4, buf5, 1, 12, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
return buf5,
class mbr_convex_hullNew(nn.Module):
"""
Miminum Bounding Rectangle (MBR)
Algorithm core: The orientation of the MBR is the same as the one of one of the edges of the point cloud convex hull, which means
the result rectangle must overlap with at least one of the edges.
"""
def _init_(self, hull_points_2d):
super(mbr_convex_hullNew, self)._init_()
self.hull_points_2d = hull_points_2d
return
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| liuhuaijjin/rpn_rois_proposals_layers | mbr_convex_hull | false | 7,122 | [
"MIT"
] | 1 | c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | https://github.com/liuhuaijjin/rpn_rois_proposals_layers/tree/c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Miminum Bounding Rectangle (MBR)
Algorithm core: The orientation of the MBR is the same as the one of one of the edges of the point cloud convex hull, which means
the result rectangle must overlap with at least one of the edges.
"""
def _init_(self, hull_points_2d):
super(mbr_convex_hull, self)._init_()
self.hull_points_2d = hull_points_2d
return
def forward(ctx, hull_points_2d):
N = hull_points_2d.shape[0]
edges = hull_points_2d[1:N, :].add(-hull_points_2d[0:N - 1, :])
edge_angles = torch.atan2(edges[:, 1], edges[:, 0])
edge_angles = torch.fmod(edge_angles, 3.1415926 / 2.0)
edge_angles = torch.abs(edge_angles)
a = torch.stack((torch.cos(edge_angles), torch.cos(edge_angles -
3.1415926 / 2.0)), 1)
a = torch.unsqueeze(a, 1)
b = torch.stack((torch.cos(edge_angles + 3.1415926 / 2.0), torch.
cos(edge_angles)), 1)
b = torch.unsqueeze(b, 1)
R_tensor = torch.cat((a, b), 1)
hull_points_2d_ = torch.unsqueeze(torch.transpose(hull_points_2d, 0,
1), 0)
rot_points = R_tensor.matmul(hull_points_2d_)
min_x = torch.min(rot_points, 2)[0]
max_x = torch.max(rot_points, 2)[0]
areas = (max_x[:, 0] - min_x[:, 0]).mul(max_x[:, 1] - min_x[:, 1])
return torch.min(areas)
def get_inputs():
return [torch.rand([4, 2, 4])]
def get_init_inputs():
return []
|
Joiner | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/f5/cf54npzh3x7q3gswk3vl4kgkcsrzzi5oyedpirpsa2u4uvx5dlpu.py
# Topologically Sorted Source Nodes: [combined, out], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# combined => add
# out => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x4 = (xindex // 64)
x3 = (xindex // 256)
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x6), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mu/cmuibskm4tre3kje5nrx7yahplyxjfujwc5gjjvks27pi6deadl4.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_1 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [3], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 5
x2 = (xindex // 20)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (5 + x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (10 + x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (15 + x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lr/clrzdnopw75bzv2n65xzhoq2vz5m2ywprfsnykpwoluhvt4v6qqb.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_1 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [3], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 5
x2 = (xindex // 20)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (5 + x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (10 + x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (15 + x0 + (20*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (5, 4), (4, 1))
assert_size_stride(primals_4, (5, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined, out], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(primals_1, primals_2, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((256, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 5), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 5), (320, 80, 20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf1, buf2, 1280, grid=grid(1280), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4, 5), (320, 80, 20, 5, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf2, buf3, 1280, grid=grid(1280), stream=stream0)
del buf2
return (buf3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.utils.data
import torch.utils
import torch.utils.checkpoint
class Joiner(torch.nn.Module):
def __init__(self, config):
super(Joiner, self).__init__()
self.tanh = torch.nn.Tanh()
self.num_outputs = config.num_tokens + 1
self.blank_index = 0
self.linear = torch.nn.Linear(config.num_joiner_hidden, self.
num_outputs)
def forward(self, encoder_out, decoder_out):
combined = encoder_out.unsqueeze(2) + decoder_out.unsqueeze(1)
out = self.tanh(combined)
out = self.linear(out).log_softmax(3)
return out
def forward_one_step(self, encoder_out, decoder_out):
combined = encoder_out + decoder_out
out = self.tanh(combined)
out = self.linear(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(num_tokens=4, num_joiner_hidden=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch.utils
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x4 = xindex // 64
x3 = xindex // 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x4), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + x6, tmp3, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 5
x2 = xindex // 20
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 5
x2 = xindex // 20
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (5, 4), (4, 1))
assert_size_stride(primals_4, (5,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(1024)](primals_1, primals_2, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((256, 5), (5, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_3, (4, 5), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 5), (320, 80, 20, 5, 1),
torch.float32)
triton_poi_fused__log_softmax_1[grid(1280)](buf1, buf2, 1280,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4, 5), (320, 80, 20, 5, 1), 0
)
del buf1
triton_poi_fused__log_softmax_2[grid(1280)](buf2, buf3, 1280,
XBLOCK=256, num_warps=4, num_stages=1)
del buf2
return buf3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), buf3
class JoinerNew(torch.nn.Module):
def __init__(self, config):
super(JoinerNew, self).__init__()
self.tanh = torch.nn.Tanh()
self.num_outputs = config.num_tokens + 1
self.blank_index = 0
self.linear = torch.nn.Linear(config.num_joiner_hidden, self.
num_outputs)
def forward_one_step(self, encoder_out, decoder_out):
combined = encoder_out + decoder_out
out = self.tanh(combined)
out = self.linear(out)
return out
def forward(self, input_0, input_1):
primals_3 = self.linear.weight
primals_4 = self.linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| lorenlugosch/graves-transducers | Joiner | false | 7,123 | [
"Apache-2.0"
] | 1 | 489f46d58eba35d34163bb8b887c31d6e043c990 | https://github.com/lorenlugosch/graves-transducers/tree/489f46d58eba35d34163bb8b887c31d6e043c990 | from _paritybench_helpers import _mock_config
import torch
import torch.utils.data
import torch.utils
import torch.utils.checkpoint
class Model(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.tanh = torch.nn.Tanh()
self.num_outputs = config.num_tokens + 1
self.blank_index = 0
self.linear = torch.nn.Linear(config.num_joiner_hidden, self.
num_outputs)
def forward(self, encoder_out, decoder_out):
combined = encoder_out.unsqueeze(2) + decoder_out.unsqueeze(1)
out = self.tanh(combined)
out = self.linear(out).log_softmax(3)
return out
def forward_one_step(self, encoder_out, decoder_out):
combined = encoder_out + decoder_out
out = self.tanh(combined)
out = self.linear(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Upsample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xm/cxmk2kod6zgjturywionsuihaxqils4fvzrd7bziqpvptc3rgw43.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (48*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cr/ccri7cffjdxhdcglzfvzhu4wpte5acu3pxionui5nbusnbkkpvyl.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_1, %primals_7, %primals_8, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_8, 1024, grid=grid(1024), stream=stream0)
del primals_8
return (buf6, primals_3, primals_5, primals_7, buf0, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Upsample(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1,
stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=4, stride=2, padding=1)
def forward(self, upsampled, shortcut):
x = torch.cat([upsampled, shortcut], dim=1)
x = self.conv1x1(x)
x = self.conv3x3(x)
x = self.deconv(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(1024)](buf6, primals_8, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
return buf6, primals_3, primals_5, primals_7, buf0, buf2, buf4
class UpsampleNew(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1,
stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=4, stride=2, padding=1)
def forward(self, input_0, input_1):
primals_3 = self.conv1x1.weight
primals_4 = self.conv1x1.bias
primals_5 = self.conv3x3.weight
primals_6 = self.conv3x3.bias
primals_7 = self.deconv.weight
primals_8 = self.deconv.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| loong8888/TextSnake.pytorch | Upsample | false | 7,124 | [
"MIT"
] | 1 | 49c24f71043c1895b91f8c7379995037fcc644f7 | https://github.com/loong8888/TextSnake.pytorch/tree/49c24f71043c1895b91f8c7379995037fcc644f7 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1,
stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=4, stride=2, padding=1)
def forward(self, upsampled, shortcut):
x = torch.cat([upsampled, shortcut], dim=1)
x = self.conv1x1(x)
x = self.conv3x3(x)
x = self.deconv(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AR | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t6/ct6f57cdvyh3ahq6iwyawuy7577bar2ftumjxqllolmn4c4lh7ph.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AR(nn.Module):
def __init__(self, window: 'int', hidden_size: 'int'):
super(AR, self).__init__()
self.linear = nn.Linear(window, hidden_size)
def forward(self, x):
x = torch.transpose(x, 1, 2)
x = self.linear(x)
x = torch.transpose(x, 1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'window': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class ARNew(nn.Module):
def __init__(self, window: 'int', hidden_size: 'int'):
super(ARNew, self).__init__()
self.linear = nn.Linear(window, hidden_size)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lucianolorenti/rul_pm | AR | false | 7,125 | [
"MIT"
] | 1 | da9dfad79129dd47d24923cfd6c833869ef7b6a7 | https://github.com/lucianolorenti/rul_pm/tree/da9dfad79129dd47d24923cfd6c833869ef7b6a7 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, window: 'int', hidden_size: 'int'):
super().__init__()
self.linear = nn.Linear(window, hidden_size)
def forward(self, x):
x = torch.transpose(x, 1, 2)
x = self.linear(x)
x = torch.transpose(x, 1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
JS_Divergence | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sb/csbrhoo2n4y5iym2xea6cnh5zhokxttxk4s4a5zwcxw52tq3eveo.py
# Topologically Sorted Source Nodes: [kl_div, kl_div_1, add], Original ATen: [aten.xlogy, aten.mul, aten.sub, aten.mean, aten.add]
# Source node to ATen node mapping:
# add => add
# kl_div => eq, full_default, full_default_1, isnan, log, mean, mul, mul_1, sub, where, where_1
# kl_div_1 => eq_1, full_default_2, full_default_3, isnan_1, log_1, mean_1, mul_2, mul_3, sub_1, where_2, where_3
# Graph fragment:
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%arg0_1,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%arg1_1,), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %log_1), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_3), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
triton_per_fused_add_mean_mul_sub_xlogy_0 = async_compile.triton('triton_per_fused_add_mean_mul_sub_xlogy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_sub_xlogy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float("nan")
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = libdevice.isnan(tmp9).to(tl.int1)
tmp16 = tmp9 == tmp2
tmp17 = tl_math.log(tmp9)
tmp18 = tmp9 * tmp17
tmp19 = tl.where(tmp16, tmp2, tmp18)
tmp20 = tl.where(tmp15, tmp7, tmp19)
tmp21 = tmp9 * tmp0
tmp22 = tmp20 - tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 256.0
tmp27 = tmp14 / tmp26
tmp28 = tmp25 / tmp26
tmp29 = tmp27 + tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [kl_div, kl_div_1, add], Original ATen: [aten.xlogy, aten.mul, aten.sub, aten.mean, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_sub_xlogy_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class JS_Divergence(nn.Module):
def __init__(self):
super().__init__()
self.engine = nn.KLDivLoss()
def forward(self, x, y):
return self.engine(x, y) + self.engine(y, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float('nan')
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = libdevice.isnan(tmp9).to(tl.int1)
tmp16 = tmp9 == tmp2
tmp17 = tl_math.log(tmp9)
tmp18 = tmp9 * tmp17
tmp19 = tl.where(tmp16, tmp2, tmp18)
tmp20 = tl.where(tmp15, tmp7, tmp19)
tmp21 = tmp9 * tmp0
tmp22 = tmp20 - tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 256.0
tmp27 = tmp14 / tmp26
tmp28 = tmp25 / tmp26
tmp29 = tmp27 + tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_sub_xlogy_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class JS_DivergenceNew(nn.Module):
def __init__(self):
super().__init__()
self.engine = nn.KLDivLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| loveorchids/omni_torch | JS_Divergence | false | 7,126 | [
"Apache-2.0"
] | 1 | 9bd654387619c0cbc6aee9e91482ecc9200138ef | https://github.com/loveorchids/omni_torch/tree/9bd654387619c0cbc6aee9e91482ecc9200138ef | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.engine = nn.KLDivLoss()
def forward(self, x, y):
return self.engine(x, y) + self.engine(y, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
mlp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cv/ccvagufpv2m2vfo3w35yjibzs4ygygyoilwi2jrpbsvlftqb6nzr.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (2048, 4), (4, 1))
assert_size_stride(primals_3, (2048, ), (1, ))
assert_size_stride(primals_4, (2048, 2048), (2048, 1))
assert_size_stride(primals_5, (2048, ), (1, ))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 8192, grid=grid(8192), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (buf4, primals_1, buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2048, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class mlp(nn.Module):
def __init__(self, seq_len):
super(mlp, self).__init__()
self.lin1 = nn.Linear(seq_len, 2048)
self.lin2 = nn.Linear(2048, 2048)
self.lin3 = nn.Linear(2048, seq_len)
self.relu = nn.ReLU()
def forward(self, input_):
input_ = input_.reshape(input_.size(0), -1)
out = self.lin1(input_)
out = self.lin2(self.relu(out))
out = self.lin3(self.relu(out))
return out.view(input_.size(0), -1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'seq_len': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (2048, 4), (4, 1))
assert_size_stride(primals_3, (2048,), (1,))
assert_size_stride(primals_4, (2048, 2048), (2048, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 2048
), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(8192)](buf1, primals_3, 8192, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2048, 2048),
(1, 2048), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(8192)](buf3, primals_5, 8192, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class mlpNew(nn.Module):
def __init__(self, seq_len):
super(mlpNew, self).__init__()
self.lin1 = nn.Linear(seq_len, 2048)
self.lin2 = nn.Linear(2048, 2048)
self.lin3 = nn.Linear(2048, seq_len)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_2 = self.lin1.weight
primals_3 = self.lin1.bias
primals_4 = self.lin2.weight
primals_5 = self.lin2.bias
primals_6 = self.lin3.weight
primals_7 = self.lin3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| liuziyang1106/TSAN-brain-age-estimation | mlp | false | 7,127 | [
"MIT"
] | 1 | 374b481291edb9516ee9871a53f7acb6a2eeaebc | https://github.com/liuziyang1106/TSAN-brain-age-estimation/tree/374b481291edb9516ee9871a53f7acb6a2eeaebc | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, seq_len):
super().__init__()
self.lin1 = nn.Linear(seq_len, 2048)
self.lin2 = nn.Linear(2048, 2048)
self.lin3 = nn.Linear(2048, seq_len)
self.relu = nn.ReLU()
def forward(self, input_):
input_ = input_.reshape(input_.size(0), -1)
out = self.lin1(input_)
out = self.lin2(self.relu(out))
out = self.lin3(self.relu(out))
return out.view(input_.size(0), -1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
Swish | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xe/cxejxkqtxrljth4tyzjublg5tugf3him5iha62nf2gwemawndksr.py
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp2 * tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.ones(1))
def forward(self, x):
return x * F.sigmoid(self.beta * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp2 * tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class SwishNew(nn.Module):
def __init__(self):
super(SwishNew, self).__init__()
self.beta = nn.Parameter(torch.ones(1))
def forward(self, input_0):
primals_1 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| lorylei/DARTS-et | Swish | false | 7,128 | [
"Apache-2.0"
] | 1 | f22cfd53c14afd6ba602b8ecfbff9cdf77fc2ff8 | https://github.com/lorylei/DARTS-et/tree/f22cfd53c14afd6ba602b8ecfbff9cdf77fc2ff8 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
class Model(nn.Module):
def __init__(self):
super().__init__()
self.beta = nn.Parameter(torch.ones(1))
def forward(self, x):
return x * F.sigmoid(self.beta * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SpatialAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/uc/cucdaa5tqnxykdmw5yqh7ir5ac35phopjcobljrg4rrtlnfjtuwd.py
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# result => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %mean], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = 4.0
tmp25 = tmp23 / tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp13, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# output => convolution
# output_1 => sigmoid
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=
kernel_size // 2)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
max_result, _ = torch.max(x, dim=1, keepdim=True)
avg_result = torch.mean(x, dim=1, keepdim=True)
result = torch.cat([max_result, avg_result], 1)
output = self.conv(result)
output = self.sigmoid(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = 4.0
tmp25 = tmp23 / tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp13, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=
kernel_size // 2)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lulor/project_vg | SpatialAttention | false | 7,129 | [
"MIT"
] | 1 | 27b0c3b3038c5a666dde516a0a265ae8ddf2059f | https://github.com/lulor/project_vg/tree/27b0c3b3038c5a666dde516a0a265ae8ddf2059f | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=
kernel_size // 2)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
max_result, _ = torch.max(x, dim=1, keepdim=True)
avg_result = torch.mean(x, dim=1, keepdim=True)
result = torch.cat([max_result, avg_result], 1)
output = self.conv(result)
output = self.sigmoid(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Predict_Network1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x6/cx6p337hpowsfltqoa32j7xn2h7wrjyej5znz6vmu2kg5gtkm6rr.py
# Topologically Sorted Source Nodes: [mean, std, sub, add, output, output_1, h], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# h => relu
# mean => mean
# output => div
# output_1 => add_1
# std => sqrt, var
# sub => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_1, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_1, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_div_mean_relu_std_sub_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_div_mean_relu_std_sub_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_relu_std_sub_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_relu_std_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-06
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr0 + (x2), tmp31, xmask)
tl.store(out_ptr1 + (x2), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mean, std, sub, add, output, output_1, h], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_relu_std_sub_threshold_backward_0.run(buf0, primals_4, buf1, buf6, 256, grid=grid(256), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_6, buf5, 256, grid=grid(256), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_7, buf5, primals_5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class LayerNorm(nn.Module):
"""
Simple 1D LayerNorm.
"""
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
output = (x - mean) / (std + self.eps)
if self.scale:
output = output * self.scale_param
if self.center:
output = output + self.center_param
return output
class Predict_Network1(nn.Module):
def __init__(self, num_inputs, hidden_dim, num_outputs, layer_norm=True,
lr=0.001):
super(Predict_Network1, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.last_fc = nn.Linear(hidden_dim, num_outputs)
self.layer_norm = layer_norm
if layer_norm:
self.ln1 = LayerNorm(hidden_dim)
self.apply(weights_init_)
self.lr = lr
self.optimizer = optim.Adam(self.parameters(), lr=self.lr)
def forward(self, input):
if self.layer_norm:
h = F.relu(self.ln1(self.linear1(input)))
else:
h = F.relu(self.linear1(input))
h = F.relu(self.linear2(h))
x = self.last_fc(h)
return x
def get_log_pi(self, own_variable, other_variable):
predict_variable = self.forward(own_variable)
log_prob = -1 * F.mse_loss(predict_variable, other_variable,
reduction='none')
log_prob = torch.sum(log_prob, -1, keepdim=True)
return log_prob
def update(self, own_variable, other_variable, mask):
predict_variable = self.forward(own_variable)
loss = F.mse_loss(predict_variable, other_variable, reduction='none')
loss = loss.sum(dim=-1, keepdim=True)
loss = (loss * mask).sum() / mask.sum()
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.parameters(), 1.0)
self.optimizer.step()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'hidden_dim': 4, 'num_outputs': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_relu_std_sub_threshold_backward_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-06
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr0 + x2, tmp31, xmask)
tl.store(out_ptr1 + x2, tmp33, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_div_mean_relu_std_sub_threshold_backward_0[grid
(256)](buf0, primals_4, buf1, buf6, 256, XBLOCK=128, num_warps=
4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_6, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), primals_7, buf5, primals_5, buf6
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class LayerNorm(nn.Module):
"""
Simple 1D LayerNorm.
"""
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
output = (x - mean) / (std + self.eps)
if self.scale:
output = output * self.scale_param
if self.center:
output = output + self.center_param
return output
class Predict_Network1New(nn.Module):
def __init__(self, num_inputs, hidden_dim, num_outputs, layer_norm=True,
lr=0.001):
super(Predict_Network1New, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.last_fc = nn.Linear(hidden_dim, num_outputs)
self.layer_norm = layer_norm
if layer_norm:
self.ln1 = LayerNorm(hidden_dim)
self.apply(weights_init_)
self.lr = lr
self.optimizer = optim.Adam(self.parameters(), lr=self.lr)
def get_log_pi(self, own_variable, other_variable):
predict_variable = self.forward(own_variable)
log_prob = -1 * F.mse_loss(predict_variable, other_variable,
reduction='none')
log_prob = torch.sum(log_prob, -1, keepdim=True)
return log_prob
def update(self, own_variable, other_variable, mask):
predict_variable = self.forward(own_variable)
loss = F.mse_loss(predict_variable, other_variable, reduction='none')
loss = loss.sum(dim=-1, keepdim=True)
loss = (loss * mask).sum() / mask.sum()
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.parameters(), 1.0)
self.optimizer.step()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_5 = self.linear2.weight
primals_4 = self.linear2.bias
primals_7 = self.last_fc.weight
primals_6 = self.last_fc.bias
primals_8 = self.ln1.center_param
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| ltzheng/CDS | Predict_Network1 | false | 7,130 | [
"Apache-2.0"
] | 1 | 397282147498647a9f26577adfa451e8478de76d | https://github.com/ltzheng/CDS/tree/397282147498647a9f26577adfa451e8478de76d | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class LayerNorm(nn.Module):
"""
Simple 1D LayerNorm.
"""
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
output = (x - mean) / (std + self.eps)
if self.scale:
output = output * self.scale_param
if self.center:
output = output + self.center_param
return output
class Model(nn.Module):
def __init__(self, num_inputs, hidden_dim, num_outputs, layer_norm=True,
lr=0.001):
super().__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.last_fc = nn.Linear(hidden_dim, num_outputs)
self.layer_norm = layer_norm
if layer_norm:
self.ln1 = LayerNorm(hidden_dim)
self.apply(weights_init_)
self.lr = lr
self.optimizer = optim.Adam(self.parameters(), lr=self.lr)
def forward(self, input):
if self.layer_norm:
h = F.relu(self.ln1(self.linear1(input)))
else:
h = F.relu(self.linear1(input))
h = F.relu(self.linear2(h))
x = self.last_fc(h)
return x
def get_log_pi(self, own_variable, other_variable):
predict_variable = self.forward(own_variable)
log_prob = -1 * F.mse_loss(predict_variable, other_variable,
reduction='none')
log_prob = torch.sum(log_prob, -1, keepdim=True)
return log_prob
def update(self, own_variable, other_variable, mask):
predict_variable = self.forward(own_variable)
loss = F.mse_loss(predict_variable, other_variable, reduction='none')
loss = loss.sum(dim=-1, keepdim=True)
loss = (loss * mask).sum() / mask.sum()
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.parameters(), 1.0)
self.optimizer.step()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hi/chiloeirghxzww3sbcen7lmt2hh5lbwe52viinhofr2qqdj4azld.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 128, grid=grid(128), stream=stream0)
del buf0
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.fc1 = nn.Linear(in_features, in_features // 2)
self.fc2 = nn.Linear(in_features // 2, out_features)
self.dropout = nn.Dropout(0.2)
def forward(self, input):
input = self.dropout(input)
x = F.leaky_relu(self.fc1(input))
x = self.fc2(x)
return x
def __repr__(self):
return '{} ({} -> {})'.format(self.__class__.__name__, self.
in_features, self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(128)](buf0, primals_3, buf1,
buf2, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), primals_4
class MLPNew(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.fc1 = nn.Linear(in_features, in_features // 2)
self.fc2 = nn.Linear(in_features // 2, out_features)
self.dropout = nn.Dropout(0.2)
def __repr__(self):
return '{} ({} -> {})'.format(self.__class__.__name__, self.
in_features, self.out_features)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| luogan1234/movie-dialog-project | MLP | false | 7,131 | [
"MIT"
] | 1 | 17ac4a10c069c6b4c41bb675b98a35b2182cf504 | https://github.com/luogan1234/movie-dialog-project/tree/17ac4a10c069c6b4c41bb675b98a35b2182cf504 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.fc1 = nn.Linear(in_features, in_features // 2)
self.fc2 = nn.Linear(in_features // 2, out_features)
self.dropout = nn.Dropout(0.2)
def forward(self, input):
input = self.dropout(input)
x = F.leaky_relu(self.fc1(input))
x = self.fc2(x)
return x
def __repr__(self):
return '{} ({} -> {})'.format(self.__class__.__name__, self.
in_features, self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MLPLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/z5/cz5bgdo2gmhnnmtf6w7lrjkvliacxo7nomq7mbmjquxqyxqgt5bj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf3, 128, grid=grid(128), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPLayer(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.in_dim = in_dim
mid_dim = in_dim // 2
self.out_dim = out_dim
self.fc1 = nn.Linear(in_dim, mid_dim)
self.fc2 = nn.Linear(mid_dim, out_dim)
self.dropout = nn.Dropout(0.2)
def forward(self, input):
input = self.dropout(input)
x = F.relu(self.fc1(input))
x = self.dropout(x)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1,
primals_3, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4, buf3
class MLPLayerNew(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.in_dim = in_dim
mid_dim = in_dim // 2
self.out_dim = out_dim
self.fc1 = nn.Linear(in_dim, mid_dim)
self.fc2 = nn.Linear(mid_dim, out_dim)
self.dropout = nn.Dropout(0.2)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| luogan1234/prerequisite-prediction-co-training | MLPLayer | false | 7,132 | [
"MIT"
] | 1 | 28e3f241ada5afe75a73525375087be230735c2a | https://github.com/luogan1234/prerequisite-prediction-co-training/tree/28e3f241ada5afe75a73525375087be230735c2a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.in_dim = in_dim
mid_dim = in_dim // 2
self.out_dim = out_dim
self.fc1 = nn.Linear(in_dim, mid_dim)
self.fc2 = nn.Linear(mid_dim, out_dim)
self.dropout = nn.Dropout(0.2)
def forward(self, input):
input = self.dropout(input)
x = F.relu(self.fc1(input))
x = self.dropout(x)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.