entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
ATLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ft/cftz5ja4wgrkcx5pv7d7j6lunvnnmz6djadit33a2463r447sayy.py
# Topologically Sorted Source Nodes: [setitem_1], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_1 => copy_1, full_default_2
# Graph fragment:
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_2, %full_default_2), kwargs = {})
# %copy__default : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%select_int, %copy_1), kwargs = {})
triton_poi_fused_fill_lift_fresh_0 = async_compile.triton('triton_poi_fused_fill_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_0', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = 0.0
tl.store(out_ptr0 + (x0 + (64*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cn/ccnjm2dqpzcrnbuww2omqd3latcfln56427nk6wxia5xlnc6b5c5.py
# Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, n_mask, sub_3, mul_2, logit2, log_softmax_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, sub_3, sum_1
# log_softmax_1 => amax_1, exp_1, sub_7, sum_3
# logit1 => sub_2
# logit2 => sub_6
# mul => mul
# mul_2 => mul_2
# n_mask => sub
# p_mask => add
# setitem => copy, full_default_1
# sub_1 => sub_1
# sub_3 => sub_5
# th_label => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select, %full_default_1), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %copy, 1, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %select_scatter_default), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 1e+30), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub_2, [-1], True), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, 1e+30), kwargs = {})
# %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul_2), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub_6, [-1], True), kwargs = {})
# %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_6, %amax_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_7,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1 = async_compile.triton('triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp2 = x1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp5 = 1.0
tmp6 = 0.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tmp1 + tmp7
tmp9 = tmp5 - tmp8
tmp10 = 1e+30
tmp11 = tmp9 * tmp10
tmp12 = tmp0 - tmp11
tmp15 = tmp14 + tmp7
tmp16 = tmp5 - tmp15
tmp17 = tmp16 * tmp10
tmp18 = tmp13 - tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp22 = tmp21 + tmp7
tmp23 = tmp5 - tmp22
tmp24 = tmp23 * tmp10
tmp25 = tmp20 - tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp29 = tmp28 + tmp7
tmp30 = tmp5 - tmp29
tmp31 = tmp30 * tmp10
tmp32 = tmp27 - tmp31
tmp33 = triton_helpers.maximum(tmp26, tmp32)
tmp34 = tmp12 - tmp33
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp18 - tmp33
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp25 - tmp33
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tmp42 = tmp32 - tmp33
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tmp45 = tmp5 - tmp1
tmp46 = tmp5 - tmp45
tmp47 = tmp46 * tmp10
tmp48 = tmp0 - tmp47
tmp49 = tmp5 - tmp14
tmp50 = tmp5 - tmp49
tmp51 = tmp50 * tmp10
tmp52 = tmp13 - tmp51
tmp53 = triton_helpers.maximum(tmp48, tmp52)
tmp54 = tmp5 - tmp21
tmp55 = tmp5 - tmp54
tmp56 = tmp55 * tmp10
tmp57 = tmp20 - tmp56
tmp58 = triton_helpers.maximum(tmp53, tmp57)
tmp59 = tmp5 - tmp28
tmp60 = tmp5 - tmp59
tmp61 = tmp60 * tmp10
tmp62 = tmp27 - tmp61
tmp63 = triton_helpers.maximum(tmp58, tmp62)
tmp64 = tmp48 - tmp63
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp52 - tmp63
tmp67 = tl_math.exp(tmp66)
tmp68 = tmp65 + tmp67
tmp69 = tmp57 - tmp63
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp68 + tmp70
tmp72 = tmp62 - tmp63
tmp73 = tl_math.exp(tmp72)
tmp74 = tmp71 + tmp73
tl.store(out_ptr0 + (x3), tmp33, xmask)
tl.store(out_ptr1 + (x3), tmp44, xmask)
tl.store(out_ptr2 + (x3), tmp63, xmask)
tl.store(out_ptr3 + (x3), tmp74, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uj/cuj4drsaoeeafppqq56jc2dqbimn76sbovf5a42dyfiev4rt56n7.py
# Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, mul_1, sum_1, loss1, n_mask, sub_3, mul_2, logit2, log_softmax_1, mul_3, sum_2, loss2, loss, loss_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax, aten.sum, aten.neg, aten.mean]
# Source node to ATen node mapping:
# log_softmax => log, sub_3, sub_4
# log_softmax_1 => amax_1, log_1, sub_7, sub_8
# logit1 => sub_2
# logit2 => sub_6
# loss => add_1
# loss1 => neg
# loss2 => neg_1
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# n_mask => sub
# p_mask => add
# setitem => copy, full_default_1
# sub_1 => sub_1
# sub_3 => sub_5
# sum_1 => sum_2
# sum_2 => sum_4
# th_label => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select, %full_default_1), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %copy, 1, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %select_scatter_default), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 1e+30), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %amax), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %log), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, 1e+30), kwargs = {})
# %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul_2), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub_6, [-1], True), kwargs = {})
# %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_6, %amax_1), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_7, %log_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %select_scatter_default), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_4,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %neg_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {})
triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2 = async_compile.triton('triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = (rindex // 16)
r4 = rindex % 16
r1 = (rindex // 4) % 4
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r4 + (64*r2)), None)
tmp1 = tl.load(in_ptr1 + (r4 + (64*r2)), None)
tmp12 = tl.load(in_ptr2 + (r1 + (16*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + (r1 + (16*r2)), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (16 + r4 + (64*r2)), None)
tmp19 = tl.load(in_ptr1 + (16 + r4 + (64*r2)), None)
tmp27 = tl.load(in_ptr2 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr3 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (32 + r4 + (64*r2)), None)
tmp35 = tl.load(in_ptr1 + (32 + r4 + (64*r2)), None)
tmp43 = tl.load(in_ptr2 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr0 + (48 + r4 + (64*r2)), None)
tmp51 = tl.load(in_ptr1 + (48 + r4 + (64*r2)), None)
tmp59 = tl.load(in_ptr2 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr4 + (r1 + (16*r2)), None, eviction_policy='evict_last')
tmp72 = tl.load(in_ptr5 + (r1 + (16*r2)), None, eviction_policy='evict_last')
tmp80 = tl.load(in_ptr4 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr5 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr4 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp93 = tl.load(in_ptr5 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp102 = tl.load(in_ptr4 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp104 = tl.load(in_ptr5 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last')
tmp2 = tl.full([1, 1], 0, tl.int32)
tmp3 = tmp2 == tmp2
tmp4 = 1.0
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp1 + tmp6
tmp8 = tmp4 - tmp7
tmp9 = 1e+30
tmp10 = tmp8 * tmp9
tmp11 = tmp0 - tmp10
tmp13 = tmp11 - tmp12
tmp15 = tl_math.log(tmp14)
tmp16 = tmp13 - tmp15
tmp17 = tmp16 * tmp1
tmp20 = tl.full([1, 1], 1, tl.int32)
tmp21 = tmp20 == tmp2
tmp22 = tl.where(tmp21, tmp4, tmp5)
tmp23 = tmp19 + tmp22
tmp24 = tmp4 - tmp23
tmp25 = tmp24 * tmp9
tmp26 = tmp18 - tmp25
tmp28 = tmp26 - tmp27
tmp30 = tl_math.log(tmp29)
tmp31 = tmp28 - tmp30
tmp32 = tmp31 * tmp19
tmp33 = tmp17 + tmp32
tmp36 = tl.full([1, 1], 2, tl.int32)
tmp37 = tmp36 == tmp2
tmp38 = tl.where(tmp37, tmp4, tmp5)
tmp39 = tmp35 + tmp38
tmp40 = tmp4 - tmp39
tmp41 = tmp40 * tmp9
tmp42 = tmp34 - tmp41
tmp44 = tmp42 - tmp43
tmp46 = tl_math.log(tmp45)
tmp47 = tmp44 - tmp46
tmp48 = tmp47 * tmp35
tmp49 = tmp33 + tmp48
tmp52 = tl.full([1, 1], 3, tl.int32)
tmp53 = tmp52 == tmp2
tmp54 = tl.where(tmp53, tmp4, tmp5)
tmp55 = tmp51 + tmp54
tmp56 = tmp4 - tmp55
tmp57 = tmp56 * tmp9
tmp58 = tmp50 - tmp57
tmp60 = tmp58 - tmp59
tmp62 = tl_math.log(tmp61)
tmp63 = tmp60 - tmp62
tmp64 = tmp63 * tmp51
tmp65 = tmp49 + tmp64
tmp66 = tmp4 - tmp1
tmp67 = tmp4 - tmp66
tmp68 = tmp67 * tmp9
tmp69 = tmp0 - tmp68
tmp71 = tmp69 - tmp70
tmp73 = tl_math.log(tmp72)
tmp74 = tmp71 - tmp73
tmp75 = tmp74 * tmp6
tmp76 = tmp4 - tmp19
tmp77 = tmp4 - tmp76
tmp78 = tmp77 * tmp9
tmp79 = tmp18 - tmp78
tmp81 = tmp79 - tmp80
tmp83 = tl_math.log(tmp82)
tmp84 = tmp81 - tmp83
tmp85 = tmp84 * tmp22
tmp86 = tmp75 + tmp85
tmp87 = tmp4 - tmp35
tmp88 = tmp4 - tmp87
tmp89 = tmp88 * tmp9
tmp90 = tmp34 - tmp89
tmp92 = tmp90 - tmp91
tmp94 = tl_math.log(tmp93)
tmp95 = tmp92 - tmp94
tmp96 = tmp95 * tmp38
tmp97 = tmp86 + tmp96
tmp98 = tmp4 - tmp51
tmp99 = tmp4 - tmp98
tmp100 = tmp99 * tmp9
tmp101 = tmp50 - tmp100
tmp103 = tmp101 - tmp102
tmp105 = tl_math.log(tmp104)
tmp106 = tmp103 - tmp105
tmp107 = tmp106 * tmp54
tmp108 = tmp97 + tmp107
tmp109 = -tmp65
tmp110 = -tmp108
tmp111 = tmp109 + tmp110
tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK])
tmp114 = tl.sum(tmp112, 1)[:, None]
tmp115 = 64.0
tmp116 = tmp114 / tmp115
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp116, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [setitem_1], Original ATen: [aten.lift_fresh, aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_lift_fresh_0.run(arg0_1, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, n_mask, sub_3, mul_2, logit2, log_softmax_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax]
triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1.run(arg1_1, arg0_1, buf1, buf2, buf4, buf5, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, mul_1, sum_1, loss1, n_mask, sub_3, mul_2, logit2, log_softmax_1, mul_3, sum_2, loss2, loss, loss_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax, aten.sum, aten.neg, aten.mean]
triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2.run(buf8, arg1_1, arg0_1, buf1, buf2, buf4, buf5, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del buf1
del buf2
del buf4
del buf5
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
class ATLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits: 'Tensor', labels: 'Tensor') ->float:
"""
Args:
logits: predicted probabilities (shape: batch size x num classes)
labels: one-hot encoded true labels (shape: batch size x num classes)
"""
th_label = torch.zeros_like(labels, dtype=torch.float)
th_label[:, 0] = 1.0
labels[:, 0] = 0.0
p_mask = labels + th_label
n_mask = 1 - labels
logit1 = logits - (1 - p_mask) * 1e+30
loss1 = -(F.log_softmax(logit1, dim=-1) * labels).sum(1)
logit2 = logits - (1 - n_mask) * 1e+30
loss2 = -(F.log_softmax(logit2, dim=-1) * th_label).sum(1)
loss = loss1 + loss2
loss = loss.mean()
return loss
def get_label(self, logits: 'Tensor', num_labels: 'int'=-1, threshold:
'float'=None) ->Tensor:
""" Calculated the labels """
if threshold:
th_logit = torch.full((len(logits), 1), threshold)
else:
th_logit = logits[:, 0].unsqueeze(1)
output = torch.zeros_like(logits)
mask = logits > th_logit
if num_labels > 0:
top_v, _ = torch.topk(logits, num_labels, dim=1)
top_v = top_v[:, -1]
mask = (logits >= top_v.unsqueeze(1)) & mask
output[mask] = 1.0
output[:, 0] = output.sum(1) == 0.0
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import Tensor
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
tmp0 = 0.0
tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1(
in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = x1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp5 = 1.0
tmp6 = 0.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tmp1 + tmp7
tmp9 = tmp5 - tmp8
tmp10 = 1e+30
tmp11 = tmp9 * tmp10
tmp12 = tmp0 - tmp11
tmp15 = tmp14 + tmp7
tmp16 = tmp5 - tmp15
tmp17 = tmp16 * tmp10
tmp18 = tmp13 - tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp22 = tmp21 + tmp7
tmp23 = tmp5 - tmp22
tmp24 = tmp23 * tmp10
tmp25 = tmp20 - tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp29 = tmp28 + tmp7
tmp30 = tmp5 - tmp29
tmp31 = tmp30 * tmp10
tmp32 = tmp27 - tmp31
tmp33 = triton_helpers.maximum(tmp26, tmp32)
tmp34 = tmp12 - tmp33
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp18 - tmp33
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp25 - tmp33
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tmp42 = tmp32 - tmp33
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tmp45 = tmp5 - tmp1
tmp46 = tmp5 - tmp45
tmp47 = tmp46 * tmp10
tmp48 = tmp0 - tmp47
tmp49 = tmp5 - tmp14
tmp50 = tmp5 - tmp49
tmp51 = tmp50 * tmp10
tmp52 = tmp13 - tmp51
tmp53 = triton_helpers.maximum(tmp48, tmp52)
tmp54 = tmp5 - tmp21
tmp55 = tmp5 - tmp54
tmp56 = tmp55 * tmp10
tmp57 = tmp20 - tmp56
tmp58 = triton_helpers.maximum(tmp53, tmp57)
tmp59 = tmp5 - tmp28
tmp60 = tmp5 - tmp59
tmp61 = tmp60 * tmp10
tmp62 = tmp27 - tmp61
tmp63 = triton_helpers.maximum(tmp58, tmp62)
tmp64 = tmp48 - tmp63
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp52 - tmp63
tmp67 = tl_math.exp(tmp66)
tmp68 = tmp65 + tmp67
tmp69 = tmp57 - tmp63
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp68 + tmp70
tmp72 = tmp62 - tmp63
tmp73 = tl_math.exp(tmp72)
tmp74 = tmp71 + tmp73
tl.store(out_ptr0 + x3, tmp33, xmask)
tl.store(out_ptr1 + x3, tmp44, xmask)
tl.store(out_ptr2 + x3, tmp63, xmask)
tl.store(out_ptr3 + x3, tmp74, xmask)
@triton.jit
def triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 16
r4 = rindex % 16
r1 = rindex // 4 % 4
tmp0 = tl.load(in_ptr0 + (r4 + 64 * r2), None)
tmp1 = tl.load(in_ptr1 + (r4 + 64 * r2), None)
tmp12 = tl.load(in_ptr2 + (r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr3 + (r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), None)
tmp19 = tl.load(in_ptr1 + (16 + r4 + 64 * r2), None)
tmp27 = tl.load(in_ptr2 + (4 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr3 + (4 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), None)
tmp35 = tl.load(in_ptr1 + (32 + r4 + 64 * r2), None)
tmp43 = tl.load(in_ptr2 + (8 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp45 = tl.load(in_ptr3 + (8 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), None)
tmp51 = tl.load(in_ptr1 + (48 + r4 + 64 * r2), None)
tmp59 = tl.load(in_ptr2 + (12 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp61 = tl.load(in_ptr3 + (12 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp70 = tl.load(in_ptr4 + (r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp72 = tl.load(in_ptr5 + (r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp80 = tl.load(in_ptr4 + (4 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp82 = tl.load(in_ptr5 + (4 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp91 = tl.load(in_ptr4 + (8 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp93 = tl.load(in_ptr5 + (8 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp102 = tl.load(in_ptr4 + (12 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp104 = tl.load(in_ptr5 + (12 + r1 + 16 * r2), None, eviction_policy=
'evict_last')
tmp2 = tl.full([1, 1], 0, tl.int32)
tmp3 = tmp2 == tmp2
tmp4 = 1.0
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp1 + tmp6
tmp8 = tmp4 - tmp7
tmp9 = 1e+30
tmp10 = tmp8 * tmp9
tmp11 = tmp0 - tmp10
tmp13 = tmp11 - tmp12
tmp15 = tl_math.log(tmp14)
tmp16 = tmp13 - tmp15
tmp17 = tmp16 * tmp1
tmp20 = tl.full([1, 1], 1, tl.int32)
tmp21 = tmp20 == tmp2
tmp22 = tl.where(tmp21, tmp4, tmp5)
tmp23 = tmp19 + tmp22
tmp24 = tmp4 - tmp23
tmp25 = tmp24 * tmp9
tmp26 = tmp18 - tmp25
tmp28 = tmp26 - tmp27
tmp30 = tl_math.log(tmp29)
tmp31 = tmp28 - tmp30
tmp32 = tmp31 * tmp19
tmp33 = tmp17 + tmp32
tmp36 = tl.full([1, 1], 2, tl.int32)
tmp37 = tmp36 == tmp2
tmp38 = tl.where(tmp37, tmp4, tmp5)
tmp39 = tmp35 + tmp38
tmp40 = tmp4 - tmp39
tmp41 = tmp40 * tmp9
tmp42 = tmp34 - tmp41
tmp44 = tmp42 - tmp43
tmp46 = tl_math.log(tmp45)
tmp47 = tmp44 - tmp46
tmp48 = tmp47 * tmp35
tmp49 = tmp33 + tmp48
tmp52 = tl.full([1, 1], 3, tl.int32)
tmp53 = tmp52 == tmp2
tmp54 = tl.where(tmp53, tmp4, tmp5)
tmp55 = tmp51 + tmp54
tmp56 = tmp4 - tmp55
tmp57 = tmp56 * tmp9
tmp58 = tmp50 - tmp57
tmp60 = tmp58 - tmp59
tmp62 = tl_math.log(tmp61)
tmp63 = tmp60 - tmp62
tmp64 = tmp63 * tmp51
tmp65 = tmp49 + tmp64
tmp66 = tmp4 - tmp1
tmp67 = tmp4 - tmp66
tmp68 = tmp67 * tmp9
tmp69 = tmp0 - tmp68
tmp71 = tmp69 - tmp70
tmp73 = tl_math.log(tmp72)
tmp74 = tmp71 - tmp73
tmp75 = tmp74 * tmp6
tmp76 = tmp4 - tmp19
tmp77 = tmp4 - tmp76
tmp78 = tmp77 * tmp9
tmp79 = tmp18 - tmp78
tmp81 = tmp79 - tmp80
tmp83 = tl_math.log(tmp82)
tmp84 = tmp81 - tmp83
tmp85 = tmp84 * tmp22
tmp86 = tmp75 + tmp85
tmp87 = tmp4 - tmp35
tmp88 = tmp4 - tmp87
tmp89 = tmp88 * tmp9
tmp90 = tmp34 - tmp89
tmp92 = tmp90 - tmp91
tmp94 = tl_math.log(tmp93)
tmp95 = tmp92 - tmp94
tmp96 = tmp95 * tmp38
tmp97 = tmp86 + tmp96
tmp98 = tmp4 - tmp51
tmp99 = tmp4 - tmp98
tmp100 = tmp99 * tmp9
tmp101 = tmp50 - tmp100
tmp103 = tmp101 - tmp102
tmp105 = tl_math.log(tmp104)
tmp106 = tmp103 - tmp105
tmp107 = tmp106 * tmp54
tmp108 = tmp97 + tmp107
tmp109 = -tmp65
tmp110 = -tmp108
tmp111 = tmp109 + tmp110
tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK])
tmp114 = tl.sum(tmp112, 1)[:, None]
tmp115 = 64.0
tmp116 = tmp114 / tmp115
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp116, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_fill_lift_fresh_0[grid(64)](arg0_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1[
grid(64)](arg1_1, arg0_1, buf1, buf2, buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7
del buf7
triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2[
grid(1)](buf8, arg1_1, arg0_1, buf1, buf2, buf4, buf5, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del buf1
del buf2
del buf4
del buf5
return buf8,
class ATLossNew(nn.Module):
def __init__(self):
super().__init__()
def get_label(self, logits: 'Tensor', num_labels: 'int'=-1, threshold:
'float'=None) ->Tensor:
""" Calculated the labels """
if threshold:
th_logit = torch.full((len(logits), 1), threshold)
else:
th_logit = logits[:, 0].unsqueeze(1)
output = torch.zeros_like(logits)
mask = logits > th_logit
if num_labels > 0:
top_v, _ = torch.topk(logits, num_labels, dim=1)
top_v = top_v[:, -1]
mask = (logits >= top_v.unsqueeze(1)) & mask
output[mask] = 1.0
output[:, 0] = output.sum(1) == 0.0
return output
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IgnatovFedor/DeepPavlov
|
ATLoss
| false | 9,176 |
[
"Apache-2.0"
] | 0 |
02ba9c4b2919384c142c170c7f89c65cf05dd426
|
https://github.com/IgnatovFedor/DeepPavlov/tree/02ba9c4b2919384c142c170c7f89c65cf05dd426
|
BilinearRanking
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/is/cisc2qptf5u5c6jlvkji4njhxsbq22eukpz7kuldfdoxadbbikoy.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = (xindex // 8)
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (8*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xi/cxi3ma2zsffgaa6murtux7xtgsgq2wj7vteix33b4iitg327qtvp.py
# Topologically Sorted Source Nodes: [softmax_logits, log_softmax], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax_1, exp_1, log, sub_1, sub_2, sum_2
# softmax_logits => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {})
triton_poi_fused__log_softmax__softmax_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tmp12 = tl_math.log(tmp10)
tmp13 = tmp4 - tmp12
tl.store(out_ptr0 + (x2), tmp11, xmask)
tl.store(out_ptr1 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 96, 8), (768, 8, 1))
assert_size_stride(primals_2, (4, 96, 8), (768, 8, 1))
assert_size_stride(primals_3, (2, 6144), (6144, 1))
assert_size_stride(primals_4, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 96, 8, 8), (6144, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 24576, grid=grid(24576), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (4, 6144), (6144, 1), 0), reinterpret_tensor(primals_3, (6144, 2), (1, 6144), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
buf3 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_logits, log_softmax], Original ATen: [aten._softmax, aten._log_softmax]
triton_poi_fused__log_softmax__softmax_1.run(buf1, buf2, buf3, 8, grid=grid(8), stream=stream0)
del buf1
return (buf2, buf3, reinterpret_tensor(buf0, (4, 6144), (6144, 1), 0), buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 96, 8), (768, 8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 96, 8), (768, 8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, 6144), (6144, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
class BilinearRanking(nn.Module):
def __init__(self, n_classes: 'int'=2, emb_size: 'int'=768, block_size:
'int'=8):
super().__init__()
self.n_classes = n_classes
self.emb_size = emb_size
self.block_size = block_size
self.bilinear = nn.Linear(self.emb_size * self.block_size, self.
n_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, text1: 'Tensor', text2: 'Tensor'):
b1 = text1.view(-1, self.emb_size // self.block_size, self.block_size)
b2 = text2.view(-1, self.emb_size // self.block_size, self.block_size)
bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size *
self.block_size)
logits = self.bilinear(bl)
softmax_logits = self.softmax(logits)
log_softmax = F.log_softmax(logits, dim=-1)
return softmax_logits, log_softmax
def get_inputs():
return [torch.rand([4, 96, 8]), torch.rand([4, 96, 8])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 8 * x2), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, None)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tmp12 = tl_math.log(tmp10)
tmp13 = tmp4 - tmp12
tl.store(out_ptr0 + x2, tmp11, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 96, 8), (768, 8, 1))
assert_size_stride(primals_2, (4, 96, 8), (768, 8, 1))
assert_size_stride(primals_3, (2, 6144), (6144, 1))
assert_size_stride(primals_4, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 96, 8, 8), (6144, 64, 8, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(24576)](primals_1, primals_2, buf0,
24576, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (4, 6144),
(6144, 1), 0), reinterpret_tensor(primals_3, (6144, 2), (1,
6144), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
buf3 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_1[grid(8)](buf1, buf2, buf3,
8, XBLOCK=8, num_warps=1, num_stages=1)
del buf1
return buf2, buf3, reinterpret_tensor(buf0, (4, 6144), (6144, 1), 0
), buf2, buf3
class BilinearRankingNew(nn.Module):
def __init__(self, n_classes: 'int'=2, emb_size: 'int'=768, block_size:
'int'=8):
super().__init__()
self.n_classes = n_classes
self.emb_size = emb_size
self.block_size = block_size
self.bilinear = nn.Linear(self.emb_size * self.block_size, self.
n_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0, input_1):
primals_3 = self.bilinear.weight
primals_4 = self.bilinear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
IgnatovFedor/DeepPavlov
|
BilinearRanking
| false | 9,177 |
[
"Apache-2.0"
] | 0 |
02ba9c4b2919384c142c170c7f89c65cf05dd426
|
https://github.com/IgnatovFedor/DeepPavlov/tree/02ba9c4b2919384c142c170c7f89c65cf05dd426
|
ConvertPointsFromHomogeneous
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/m7/cm7u2xidzfzafcfgds4g3sogyf6hfbowvehkuqc2rzfo2k3uufot.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_1, %slice_2), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def convert_points_from_homogeneous(points):
"""Function that converts points from homogeneous to Euclidean space.
See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_from_homogeneous(input) # BxNx2
"""
if not torch.is_tensor(points):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(points)))
if len(points.shape) < 2:
raise ValueError('Input must be at least a 2D tensor. Got {}'.
format(points.shape))
return points[..., :-1] / points[..., -1:]
class ConvertPointsFromHomogeneous(nn.Module):
"""Creates a transformation that converts points from homogeneous to
Euclidean space.
Args:
points (Tensor): tensor of N-dimensional points.
Returns:
Tensor: tensor of N-1-dimensional points.
Shape:
- Input: :math:`(B, D, N)` or :math:`(D, N)`
- Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> transform = tgm.ConvertPointsFromHomogeneous()
>>> output = transform(input) # BxNx2
"""
def __init__(self):
super(ConvertPointsFromHomogeneous, self).__init__()
def forward(self, input):
return convert_points_from_homogeneous(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def convert_points_from_homogeneous(points):
"""Function that converts points from homogeneous to Euclidean space.
See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_from_homogeneous(input) # BxNx2
"""
if not torch.is_tensor(points):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(points)))
if len(points.shape) < 2:
raise ValueError('Input must be at least a 2D tensor. Got {}'.
format(points.shape))
return points[..., :-1] / points[..., -1:]
class ConvertPointsFromHomogeneousNew(nn.Module):
"""Creates a transformation that converts points from homogeneous to
Euclidean space.
Args:
points (Tensor): tensor of N-dimensional points.
Returns:
Tensor: tensor of N-1-dimensional points.
Shape:
- Input: :math:`(B, D, N)` or :math:`(D, N)`
- Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> transform = tgm.ConvertPointsFromHomogeneous()
>>> output = transform(input) # BxNx2
"""
def __init__(self):
super(ConvertPointsFromHomogeneousNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JudyYe/frankmocap
|
ConvertPointsFromHomogeneous
| false | 9,178 |
[
"BSD-3-Clause"
] | 0 |
b6e63f344e852ebdbca0095643b5bc0466370891
|
https://github.com/JudyYe/frankmocap/tree/b6e63f344e852ebdbca0095643b5bc0466370891
|
LayerNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/do/cdoahs7c227u5jaalk3qaaedn23dpnuwahibj3cx77ddefse5mx5.py
# Topologically Sorted Source Nodes: [mean, std, sub, add, y, mul, y_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mul => mul
# std => var
# sub => sub
# y => div
# y_1 => add_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1]), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1.0})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %view_1), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_5), kwargs = {})
triton_per_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp31 = tmp29 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, std, sub, add, y, mul, y_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0.run(buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
return (buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-05, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
y = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
y = self.gamma.view(*shape) * y + self.beta.view(*shape)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp31 = tmp29 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(4)](buf1, buf5,
primals_1, primals_2, primals_3, buf6, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1,
1), 0), buf5
class LayerNormNew(nn.Module):
def __init__(self, num_features, eps=1e-05, affine=True):
super(LayerNormNew, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JieFeng-cse/power-system-rl
|
LayerNorm
| false | 9,179 |
[
"MIT"
] | 0 |
8295d14da83a40c755b8e6a14785c53a238f9a64
|
https://github.com/JieFeng-cse/power-system-rl/tree/8295d14da83a40c755b8e6a14785c53a238f9a64
|
UnbalancedLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5u/c5u7c3xfjaazdtxwasop4c2d3vlwaagqtikdsnpoz56tcxjbvfvx.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.utils.data
class UnbalancedLoss(nn.Module):
NUM_LABELS = 2
def __init__(self):
super().__init__()
self.crit = nn.BCEWithLogitsLoss()
def forward(self, logits, label):
return self.crit(logits, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class UnbalancedLossNew(nn.Module):
NUM_LABELS = 2
def __init__(self):
super().__init__()
self.crit = nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Kausta/DeepGlobalRegistration
|
UnbalancedLoss
| false | 9,180 |
[
"MIT"
] | 0 |
4f087d4c775f607e335616e95d8fb28e53d4b823
|
https://github.com/Kausta/DeepGlobalRegistration/tree/4f087d4c775f607e335616e95d8fb28e53d4b823
|
DiceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/i2/ci2unyguiqkqbkmkuenp62eakmjtulk6jn2mbhuhsgwwutfsgvyz.py
# Topologically Sorted Source Nodes: [mul, a, mul_1, b, c, add, add_1, d, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# a => sum_1
# add => add
# add_1 => add_1
# b => sum_2
# c => sum_3
# d => div
# mul => mul
# mul_1 => mul_1
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = tmp8 + tmp11
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tmp13 / tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, a, mul_1, b, c, add, add_1, d, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, eps=1e-06):
super().__init__()
assert isinstance(eps, float)
self.eps = eps
def forward(self, pred, target, mask=None):
pred = pred.contiguous().view(pred.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1)
if mask is not None:
mask = mask.contiguous().view(mask.size()[0], -1)
pred = pred * mask
target = target * mask
a = torch.sum(pred * target)
b = torch.sum(pred)
c = torch.sum(target)
d = 2 * a / (b + c + self.eps)
return 1 - d
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = tmp8 + tmp11
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tmp13 / tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, eps=1e-06):
super().__init__()
assert isinstance(eps, float)
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
HolyCrap96/mmocr-1
|
DiceLoss
| false | 9,181 |
[
"Apache-2.0"
] | 0 |
c6c4acd39b1c56fec1b87530b2d241fe8af4ceed
|
https://github.com/HolyCrap96/mmocr-1/tree/c6c4acd39b1c56fec1b87530b2d241fe8af4ceed
|
RelPositionMultiHeadedAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/st/cstdkeotatufdikalo45nsknyfku4ofvhlziami6ybawevy72lal.py
# Topologically Sorted Source Nodes: [matrix_ac, matrix_bd], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matrix_ac => clone
# matrix_bd => clone_2
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + (4*y3)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mh/cmhet4vfl4jlxtge4zzaaa2nugvxpr5f4ge7rs72qwe2aow7vxwy.py
# Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matrix_ac => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xp/cxp3ouwpdhdlmipppq44wjaey2obmthzec7uqoddmpoigfmupxdx.py
# Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matrix_bd => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/u3/cu3opkmzrgckmrk64elyf7ml2a2gq2grpcbtiejtq54tloftwrmr.py
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# mask => eq
# Graph fragment:
# %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {})
triton_poi_fused_eq_3 = async_compile.triton('triton_poi_fused_eq_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/el/celmnq4cmthcneo2qgxp66qy3keynodxqjqu5pn2ewxs4o4pk4pv.py
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# add_2 => add_2
# scores => div
# scores_1 => full_default, where
# softmax => amax, exp, sub, sum_1
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %view_17), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_div_masked_fill_4 = async_compile.triton('triton_poi_fused__softmax_add_div_masked_fill_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_masked_fill_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (4*x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float("-inf")
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = tmp11 * tmp4
tmp13 = tl.where(tmp8, tmp6, tmp12)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp4
tmp20 = tl.where(tmp15, tmp6, tmp19)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 * tmp4
tmp27 = tl.where(tmp22, tmp6, tmp26)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + (x3), tmp28, xmask)
tl.store(out_ptr1 + (x3), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/fn/cfnmi7pncqelvz56byxmokejmd37qckxv3wfecynqkl3e6gmjuq3.py
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax, attn], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# add_2 => add_2
# attn => full_default_1, where_1
# scores => div
# scores_1 => full_default, where
# softmax => div_1, exp, sub
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %view_17), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %div_1), kwargs = {})
triton_poi_fused__softmax_add_div_masked_fill_5 = async_compile.triton('triton_poi_fused__softmax_add_div_masked_fill_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_masked_fill_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 16
x5 = xindex
x6 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x5), xmask)
tmp2 = tl.load(in_ptr1 + (x5), xmask)
tmp8 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x6), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float("-inf")
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = 0.0
tmp14 = tl.where(tmp0, tmp13, tmp12)
tl.store(in_out_ptr0 + (x5), tmp12, xmask)
tl.store(out_ptr0 + (x5), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 1), (1, 1))
assert_size_stride(primals_13, (4, 1), (1, 1))
assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3)
del primals_11
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matrix_ac, matrix_bd], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, primals_12, primals_13, buf4, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_12
del primals_13
del primals_3
buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq]
triton_poi_fused_eq_3.run(primals_14, buf10, 64, grid=grid(64), stream=stream0)
del primals_14
buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf3 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_add_div_masked_fill_4.run(buf10, buf6, buf9, buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax, attn], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_add_div_masked_fill_5.run(buf13, buf10, buf9, buf11, buf12, buf14, 256, grid=grid(256), stream=stream0)
del buf9
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_8, buf15, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16)
buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf16, buf17, 16, 4, grid=grid(16, 4), stream=stream0)
buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf18)
del primals_16
return (reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from typing import Optional
from typing import Tuple
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu: 'bool'=False):
"""Compute relative positinal encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, size).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x
.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb: 'torch.Tensor'
):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
pos_emb (torch.Tensor): Positional embedding tensor
(#batch, time2, size).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from typing import Optional
from typing import Tuple
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 4 * x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float('-inf')
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = tmp11 * tmp4
tmp13 = tl.where(tmp8, tmp6, tmp12)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp4
tmp20 = tl.where(tmp15, tmp6, tmp19)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 * tmp4
tmp27 = tl.where(tmp22, tmp6, tmp26)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x3, tmp28, xmask)
tl.store(out_ptr1 + x3, tmp39, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp2 = tl.load(in_ptr1 + x5, xmask)
tmp8 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float('-inf')
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = 0.0
tmp14 = tl.where(tmp0, tmp13, tmp12)
tl.store(in_out_ptr0 + x5, tmp12, xmask)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 1), (1, 1))
assert_size_stride(primals_13, (4, 1), (1, 1))
assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3)
del primals_11
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, primals_12,
primals_13, buf4, buf7, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1,
num_stages=1)
del primals_12
del primals_13
del primals_3
buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf5, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_3[grid(64)](primals_14, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_14
buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_div_masked_fill_4[grid(64)](buf10,
buf6, buf9, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_add_div_masked_fill_5[grid(256)](buf13,
buf10, buf9, buf11, buf12, buf14, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf9
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf15, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_8
buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16)
buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf11
triton_poi_fused_clone_2[grid(16, 4)](buf16, buf17, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0)
del buf16
extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf18)
del primals_16
return reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0
), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttentionNew(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu: 'bool'=False):
"""Compute relative positinal encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, size).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x
.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, input_0, input_1, input_2, input_3, input_4):
primals_12 = self.pos_bias_u
primals_13 = self.pos_bias_v
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_11 = self.linear_out.weight
primals_16 = self.linear_out.bias
primals_15 = self.linear_pos.weight
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
primals_14 = input_4
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0]
|
JJoving/wenet
|
RelPositionMultiHeadedAttention
| false | 9,182 |
[
"Apache-2.0"
] | 0 |
4a2195744dba43fe4fb9ad8d46a2b90a80dbdc4e
|
https://github.com/JJoving/wenet/tree/4a2195744dba43fe4fb9ad8d46a2b90a80dbdc4e
|
CNN_2
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/up/cupulppb6gntt36vlwmxuai5yj6kvwpdqsf65wjj4wwaeiqznte5.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7a/c7af3agsnw76zx5luwpaougjaunb6etthvofkg6f5vde7f7p6vuc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 32768], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 20736
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (20736*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (62208*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xs/cxsyq4kjzmygvpoxfzfw4hzamfkj5tnlg4txbrqbf7aatlejhfwx.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qw/cqwg3xdwx3eugpxibvknyiosqdjdt7h7peu75twynersbsv447ra.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/23/c232cjjrpfm7piga4i2u2f6iwdslqscw63lwt2xylgovf64odkma.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xt/cxtc6zqgekt73ixiwnvvr76pr3libvfegmie7jvp3ypnsbd3lgau.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2508800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/eo/ceolem5o45bnoel733kkdfinlsav4kvs6l6vm6gkaa25gtvdvxwv.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 627200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32) % 70
x2 = (xindex // 2240)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (8960*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (8960*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (4480 + x0 + (64*x1) + (8960*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (4512 + x0 + (64*x1) + (8960*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dw/cdw27peb3db4ovqkh5ukksc7xn76oee4eav4nngvq227yw4mmprd.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uu/cuuw42yfjgpxfazwuejjlybvnafgaezkfo566fnw4jqojcssrhcz.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 33
x2 = (xindex // 2112)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8448*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8448*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (4224 + x0 + (128*x1) + (8448*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (4288 + x0 + (128*x1) + (8448*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/g4/cg4cglz5uiedjljn6sqse4ihxktsmoenkfys64tfegwxjdepakxz.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_6 => convolution_2
# x_7 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 430592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vo/cvoec6js5hj7hnoaqzdekbehl4726lz67ah5kceaz2lcox2i6hbh.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_8 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 14
x2 = (xindex // 1792) % 14
x3 = (xindex // 25088)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (7424*x2) + (107648*x3)), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (7424*x2) + (107648*x3)), None)
tmp3 = tl.load(in_ptr0 + (3712 + x0 + (256*x1) + (7424*x2) + (107648*x3)), None)
tmp5 = tl.load(in_ptr0 + (3840 + x0 + (256*x1) + (7424*x2) + (107648*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4), tmp6, None)
tl.store(out_ptr1 + (x4), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qt/cqtwkhdxxapizmlto6ji6wslorphr42sw4ejgqjbijhxbmcgkx5o.py
# Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_10 => relu_3
# x_9 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wv/cwvw7jm4ntzfbb3diujnt6a5wywbwm3jpfrwagzy5hhywdteznte.py
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_11 => _low_memory_max_pool2d_with_offsets_3, getitem_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 100
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = (yindex // 5)
y5 = yindex
y4 = (yindex // 25)
y6 = yindex % 25
tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1280 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1408 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (25*x2) + (3200*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vd/cvdclb6isq6qacdjt5mu3x6xcg6bwxbi355adgpwgpr7g7zruecy.py
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_14 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_13 = async_compile.triton('triton_poi_fused_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uy/cuyjh2ef6ogvokz4xoxtwjix6r3r6245oogvhbf4ucrmbbahwuyn.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# x_16 => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_14 = async_compile.triton('triton_per_fused__log_softmax_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_14(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl_math.log(tmp8)
tmp10 = tmp4 - tmp9
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 144, 144), (62208, 20736, 144, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (50, 12800), (12800, 1))
assert_size_stride(primals_11, (50, ), (1, ))
assert_size_stride(primals_12, (2, 50), (50, 1))
assert_size_stride(primals_13, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 5, 5), (75, 1, 15, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 96, 25, grid=grid(96, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 144, 144), (62208, 1, 432, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 20736, grid=grid(12, 20736), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 25, grid=grid(2048, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 16384, 25, grid=grid(16384, 25), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 140, 140), (627200, 1, 4480, 32))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 2508800, grid=grid(2508800), stream=stream0)
del primals_2
buf7 = empty_strided_cuda((4, 32, 70, 70), (156800, 1, 2240, 32), torch.float32)
buf8 = empty_strided_cuda((4, 32, 70, 70), (156800, 1, 2240, 32), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_6.run(buf6, buf7, buf8, 627200, grid=grid(627200), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 66, 66), (278784, 1, 4224, 64))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 1115136, grid=grid(1115136), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 64, 33, 33), (69696, 1, 2112, 64), torch.float32)
buf12 = empty_strided_cuda((4, 64, 33, 33), (69696, 1, 2112, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 278784, grid=grid(278784), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 29, 29), (107648, 1, 3712, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf14, primals_7, 430592, grid=grid(430592), stream=stream0)
del primals_7
buf15 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128), torch.float32)
buf16 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128), torch.int8)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf14, buf15, buf16, 100352, grid=grid(100352), stream=stream0)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 10, 10), (12800, 1, 1280, 128))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf18, primals_9, 51200, grid=grid(51200), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8)
buf20 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf18, buf19, buf20, 100, 128, grid=grid(100, 128), stream=stream0)
buf21 = empty_strided_cuda((1, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (1, 12800), (0, 1), 0), reinterpret_tensor(primals_10, (12800, 50), (1, 12800), 0), out=buf21)
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.relu]
triton_poi_fused_relu_13.run(buf22, primals_11, 50, grid=grid(50), stream=stream0)
del primals_11
buf23 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf22, reinterpret_tensor(primals_12, (50, 2), (1, 50), 0), alpha=1, beta=1, out=buf23)
del primals_13
buf26 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_14.run(buf23, buf26, 1, 2, grid=grid(1), stream=stream0)
del buf23
return (buf26, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor(buf20, (1, 12800), (12800, 1), 0), buf22, buf26, primals_12, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 144, 144), (62208, 20736, 144, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((50, 12800), (12800, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((2, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN_2(nn.Module):
def __init__(self, input_size, n_feature, output_size):
super(CNN_2, self).__init__()
self.n_feature = n_feature
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.conv3 = nn.Conv2d(64, 128, kernel_size=5)
self.conv4 = nn.Conv2d(128, 128, kernel_size=5)
self.fc1 = nn.Linear(128 * 10 * 10, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x, verbose=False):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
x = self.conv3(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
x = self.conv4(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
x = x.view(-1, 128 * 10 * 10)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 3, 144, 144])]
def get_init_inputs():
return [[], {'input_size': 4, 'n_feature': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 20736
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 20736 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 62208 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 627200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32 % 70
x2 = xindex // 2240
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 8960 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 8960 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (4480 + x0 + 64 * x1 + 8960 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (4512 + x0 + 64 * x1 + 8960 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 33
x2 = xindex // 2112
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8448 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8448 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (4224 + x0 + 128 * x1 + 8448 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (4288 + x0 + 128 * x1 + 8448 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 430592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 14
x2 = xindex // 1792 % 14
x3 = xindex // 25088
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 7424 * x2 + 107648 * x3), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 7424 * x2 + 107648 * x3
), None)
tmp3 = tl.load(in_ptr0 + (3712 + x0 + 256 * x1 + 7424 * x2 + 107648 *
x3), None)
tmp5 = tl.load(in_ptr0 + (3840 + x0 + 256 * x1 + 7424 * x2 + 107648 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, None)
tl.store(out_ptr1 + x4, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 100
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = yindex // 5
y5 = yindex
y4 = yindex // 25
y6 = yindex % 25
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 2560 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 2560 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1280 + x2 + 256 * y0 + 2560 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1408 + x2 + 256 * y0 + 2560 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 25 * x2 + 3200 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 50
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_14(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl_math.log(tmp8)
tmp10 = tmp4 - tmp9
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 144, 144), (62208, 20736, 144, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (50, 12800), (12800, 1))
assert_size_stride(primals_11, (50,), (1,))
assert_size_stride(primals_12, (2, 50), (50, 1))
assert_size_stride(primals_13, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 5, 5), (75, 1, 15, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 25)](primals_1, buf0, 96, 25, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 144, 144), (62208, 1, 432, 3),
torch.float32)
triton_poi_fused_1[grid(12, 20736)](primals_3, buf1, 12, 20736,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 25)](primals_4, buf2, 2048, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 25)](primals_6, buf3, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 25)](primals_8, buf4, 16384, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 140, 140), (627200, 1, 4480, 32))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(2508800)](buf6, primals_2,
2508800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf7 = empty_strided_cuda((4, 32, 70, 70), (156800, 1, 2240, 32),
torch.float32)
buf8 = empty_strided_cuda((4, 32, 70, 70), (156800, 1, 2240, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(627200)](buf6, buf7,
buf8, 627200, XBLOCK=1024, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 66, 66), (278784, 1, 4224, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(1115136)](buf10, primals_5,
1115136, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 64, 33, 33), (69696, 1, 2112, 64),
torch.float32)
buf12 = empty_strided_cuda((4, 64, 33, 33), (69696, 1, 2112, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(278784)](buf10,
buf11, buf12, 278784, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 29, 29), (107648, 1, 3712, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_9[grid(430592)](buf14, primals_7,
430592, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128),
torch.float32)
buf16 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(100352)](buf14,
buf15, buf16, 100352, XBLOCK=512, num_warps=8, num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 10, 10), (12800, 1, 1280, 128))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_11[grid(51200)](buf18, primals_9,
51200, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128),
torch.int8)
buf20 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(100, 128)](buf18,
buf19, buf20, 100, 128, XBLOCK=128, YBLOCK=2, num_warps=4,
num_stages=1)
buf21 = empty_strided_cuda((1, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (1, 12800), (0, 1), 0),
reinterpret_tensor(primals_10, (12800, 50), (1, 12800), 0), out
=buf21)
buf22 = buf21
del buf21
triton_poi_fused_relu_13[grid(50)](buf22, primals_11, 50, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_11
buf23 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_13, buf22, reinterpret_tensor(
primals_12, (50, 2), (1, 50), 0), alpha=1, beta=1, out=buf23)
del primals_13
buf26 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
triton_per_fused__log_softmax_14[grid(1)](buf23, buf26, 1, 2,
XBLOCK=1, num_warps=2, num_stages=1)
del buf23
return (buf26, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10,
buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor
(buf20, (1, 12800), (12800, 1), 0), buf22, buf26, primals_12,
primals_10)
class CNN_2New(nn.Module):
def __init__(self, input_size, n_feature, output_size):
super(CNN_2New, self).__init__()
self.n_feature = n_feature
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.conv3 = nn.Conv2d(64, 128, kernel_size=5)
self.conv4 = nn.Conv2d(128, 128, kernel_size=5)
self.fc1 = nn.Linear(128 * 10 * 10, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc2.weight
primals_13 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
IbrahimEl-Shal/CatDogClassifier
|
CNN_2
| false | 9,184 |
[
"MIT"
] | 0 |
aa6e73b679a181593f8297726da94b70d3b51407
|
https://github.com/IbrahimEl-Shal/CatDogClassifier/tree/aa6e73b679a181593f8297726da94b70d3b51407
|
GeneralizedDiceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/d5/cd52mlrkv7fg7ptxw5vaz664ocriv2bgu4unk4oe24v6nbgmv7uv.py
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# ground_o => sum_2
# intersection => sum_1
# mul => mul
# pred_o => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2, 3]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6l/c6l2gkrph5u6qhzfpwcqqfu277drwwd7ra3dyhys4nf64xwbbpre.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%select, [%isinf], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = libdevice.isinf(tmp3).to(tl.int1)
tmp5 = 0.0
tmp6 = tl.where(tmp4, tmp5, tmp3)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cy/ccy7idfxu272yvofjbbp54inr6a5zn7jkvgbq7732tfnhhq2lmr7.py
# Topologically Sorted Source Nodes: [mul_1, w], Original ATen: [aten.mul, aten.reciprocal]
# Source node to ATen node mapping:
# mul_1 => mul_1
# w => reciprocal
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %sum_2), kwargs = {})
# %reciprocal : [num_users=2] = call_function[target=torch.ops.aten.reciprocal.default](args = (%mul_1,), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%reciprocal, %index_put, 0, 0), kwargs = {})
triton_poi_fused_mul_reciprocal_2 = async_compile.triton('triton_poi_fused_mul_reciprocal_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reciprocal_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp4 * tmp4
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = tl.where(tmp2, tmp3, tmp7)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jk/cjkuxs5m7ejvswg7hrk2llstygqftgn2i6wdk7h25jxqaj665z6b.py
# Topologically Sorted Source Nodes: [max_1, setitem_1], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_1 => max_1
# setitem_1 => index_put_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_4,), kwargs = {})
# %index_put_1 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_4, [%isinf], %max_1), kwargs = {})
triton_per_fused_index_put_max_3 = async_compile.triton('triton_per_fused_index_put_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_3', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_3(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp0 = tl.full([1, 1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp4 = tmp3 * tmp3
tmp5 = tl.full([1, 1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tl.where(tmp1, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = triton_helpers.max2(tmp8, 1)[:, None]
tmp11 = libdevice.isinf(tmp6).to(tl.int1)
tmp12 = tl.where(tmp11, tmp10, tmp7)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4q/c4ql4xyg2xlaa3uezk7gjgywmt3fw3tynazq4ckpuwcsgzw6zhei.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %index_put_1, 0, 0), kwargs = {})
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/o5/co5ocweef7wvpqwak7rmrihdxwrqz6mechmdzkcte5vjlsv5tdhx.py
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_2 => full_default_1, index_put_2
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_2 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_6, [%isinf_1], %full_default_1), kwargs = {})
triton_poi_fused_index_put_lift_fresh_5 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_5', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_5(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (4 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/yv/cyvyap3cqstn6kbri4i6qtdbvfru2ywjrkts4h2byghwjatyop4k.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %index_put_2, 0, 1), kwargs = {})
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tg/ctgfsxxk2urovszhep4mbnqcv7roo2a5o633p6v6krrzhegbz6tr.py
# Topologically Sorted Source Nodes: [max_2, setitem_3], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_2 => max_2
# setitem_3 => index_put_3
# Graph fragment:
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_7,), kwargs = {})
# %index_put_3 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_7, [%isinf_1], %max_2), kwargs = {})
triton_per_fused_index_put_max_7 = async_compile.triton('triton_per_fused_index_put_max_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_7', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_7(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (4 + r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(4 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/fy/cfy4vgr7piqugeft2crnhfbw5ielxsgean22zjdvpklq4txeer5o.py
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_4 => full_default_2, index_put_4
# Graph fragment:
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_4 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_9, [%isinf_2], %full_default_2), kwargs = {})
triton_poi_fused_index_put_lift_fresh_8 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_8', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_8(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (8 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zj/czjcmriyujs5brmyh7stjrkfk37e4twzbljor3wofssbrxsuw63t.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %index_put_4, 0, 2), kwargs = {})
triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hx/chxbr5aueebc6hjsn4ej4rftudtvp5pbxyu5cbojq5zy2lksc2pa.py
# Topologically Sorted Source Nodes: [max_3, setitem_5], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_3 => max_3
# setitem_5 => index_put_5
# Graph fragment:
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_10,), kwargs = {})
# %index_put_5 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_10, [%isinf_2], %max_3), kwargs = {})
triton_per_fused_index_put_max_10 = async_compile.triton('triton_per_fused_index_put_max_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_10', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_10(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (4 + r0), None)
tmp10 = tl.load(in_ptr1 + (8 + r0), None)
tmp0 = tl.full([1, 1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(8 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wq/cwqjjijigoiri7ebhb452bt4tizigzg2o6ji6lsekghkviga7xp3.py
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_6 => full_default_3, index_put_6
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_6 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_12, [%isinf_3], %full_default_3), kwargs = {})
triton_poi_fused_index_put_lift_fresh_11 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_11', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_11(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (12 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ux/cuxhakfkc47pmz4gov3quzmpxdbvtixikhnf3nicsxfr6rvsdnli.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %index_put_6, 0, 3), kwargs = {})
triton_poi_fused_12 = async_compile.triton('triton_poi_fused_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/lf/clf4pxfzowgmhow2bm4nhu2drefrnqqu6uqv5egtoum633aeciwm.py
# Topologically Sorted Source Nodes: [max_4, setitem_7], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_4 => max_4
# setitem_7 => index_put_7
# Graph fragment:
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_13,), kwargs = {})
# %index_put_7 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_13, [%isinf_3], %max_4), kwargs = {})
triton_per_fused_index_put_max_13 = async_compile.triton('triton_per_fused_index_put_max_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_13', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_13(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + (8 + r0), None)
tmp10 = tl.load(in_ptr1 + (12 + r0), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 2, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/s2/cs2wyfvtdb7vmxx7u5otf7upxsggzrwonnusuhl3h5txytfu73v3.py
# Topologically Sorted Source Nodes: [mul_2, sum_4, mul_3, add_1, denominator, mul_4, sum_5, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# denominator => add
# f => sub
# f_1 => mean
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# sum_4 => sum_4
# sum_5 => sum_5
# truediv => div
# Graph fragment:
# %select_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_6, %index_put_7, 0, 3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %select_scatter_default_7), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1e-05), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %select_scatter_default_7), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sum_14 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (12))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (14))
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (15))
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp26 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (4*r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp1 = r0
tmp2 = tl.full([1, 1], 3, tl.int32)
tmp3 = tmp1 == tmp2
tmp7 = tl.where(tmp3, tmp5, tmp6)
tmp8 = tmp0 * tmp7
tmp13 = tl.where(tmp3, tmp11, tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp8 + tmp14
tmp20 = tl.where(tmp3, tmp18, tmp19)
tmp21 = tmp16 * tmp20
tmp22 = tmp15 + tmp21
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tmp23 * tmp27
tmp29 = tmp22 + tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp7
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp13
tmp38 = tmp33 + tmp37
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp20
tmp43 = tmp38 + tmp42
tmp46 = tmp44 + tmp45
tmp47 = tmp46 * tmp27
tmp48 = tmp43 + tmp47
tmp49 = 2.0
tmp50 = tmp29 * tmp49
tmp51 = 1e-05
tmp52 = tmp50 + tmp51
tmp53 = tmp48 + tmp51
tmp54 = tmp52 / tmp53
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp61, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf29, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_1.run(buf1, buf2, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, w], Original ATen: [aten.mul, aten.reciprocal]
triton_poi_fused_mul_reciprocal_2.run(buf2, buf1, buf4, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_1, setitem_1], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_3.run(buf2, buf1, buf4, 1, 4, grid=grid(1), stream=stream0)
del buf2
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(buf4, buf7, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_5.run(buf4, buf7, 4, grid=grid(4), stream=stream0)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(buf7, buf11, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_2, setitem_3], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_7.run(buf7, buf4, buf11, 1, 4, grid=grid(1), stream=stream0)
buf14 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(buf11, buf14, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_8.run(buf11, buf14, 4, grid=grid(4), stream=stream0)
buf18 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf14, buf18, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_3, setitem_5], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_10.run(buf14, buf11, buf18, 1, 4, grid=grid(1), stream=stream0)
buf21 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf18, buf21, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_11.run(buf18, buf21, 4, grid=grid(4), stream=stream0)
buf25 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_12.run(buf21, buf25, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_4, setitem_7], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_13.run(buf21, buf18, buf25, 1, 4, grid=grid(1), stream=stream0)
del buf18
del buf21
buf31 = empty_strided_cuda((), (), torch.float32)
buf32 = buf31; del buf31 # reuse
# Topologically Sorted Source Nodes: [mul_2, sum_4, mul_3, add_1, denominator, mul_4, sum_5, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
triton_per_fused_add_div_mean_mul_rsub_sum_14.run(buf32, buf0, buf25, buf1, buf29, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf25
del buf29
return (buf32, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import warnings
import numpy as np
from typing import Callable
from torch.nn.modules.loss import _Loss
def one_hot(labels, num_classes):
"""
Converts label image `labels` to a one-hot vector with `num_classes` number of channels as last dimension.
"""
labels = labels % num_classes
y = np.eye(num_classes)
onehot = y[labels.flatten()]
return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels
.dtype)
class GeneralizedDiceLoss(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, w_type: 'str'
='square', reduction: 'str'='mean'):
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
w_type ('square'|'simple'|'uniform'): type of function to transform ground truth volume to a weight factor.
Default: `'square'`
reduction (`none|mean|sum`): Specifies the reduction to apply to the output:
``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the batch size in the output,
``'sum'``: the output will be summed over the batch dim.
Default: ``'mean'``.
"""
super().__init__(reduction=reduction)
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'reduction={reduction} is invalid. Valid options are: none, mean or sum.'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
if sigmoid and softmax:
raise ValueError(
'sigmoid=True and softmax=True are not compatible.')
self.sigmoid = sigmoid
self.softmax = softmax
self.w_func: 'Callable' = torch.ones_like
if w_type == 'simple':
self.w_func = torch.reciprocal
elif w_type == 'square':
self.w_func = lambda x: torch.reciprocal(x * x)
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', smooth:
'float'=1e-05):
"""
Args:
input (tensor): the shape should be BNH[WD].
target (tensor): the shape should be BNH[WD].
smooth: a small constant to avoid nan.
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if n_pred_ch == 1:
if self.softmax:
warnings.warn(
'single channel prediction, `softmax=True` ignored.')
if self.to_onehot_y:
warnings.warn(
'single channel prediction, `to_onehot_y=True` ignored.')
if not self.include_background:
warnings.warn(
'single channel prediction, `include_background=False` ignored.'
)
else:
if self.softmax:
input = torch.softmax(input, 1)
if self.to_onehot_y:
target = one_hot(target, n_pred_ch)
if not self.include_background:
target = target[:, 1:]
input = input[:, 1:]
assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})'
reduce_axis = list(range(2, len(input.shape)))
intersection = torch.sum(target * input, reduce_axis)
ground_o = torch.sum(target, reduce_axis)
pred_o = torch.sum(input, reduce_axis)
denominator = ground_o + pred_o
w = self.w_func(ground_o.float())
for b in w:
infs = torch.isinf(b)
b[infs] = 0.0
b[infs] = torch.max(b)
f = 1.0 - (2.0 * (intersection * w).sum(1) + smooth) / ((
denominator * w).sum(1) + smooth)
if self.reduction == 'mean':
f = torch.mean(f)
elif self.reduction == 'sum':
f = torch.sum(f)
elif self.reduction == 'none':
pass
else:
raise ValueError(f'reduction={self.reduction} is invalid.')
return f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from typing import Callable
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = libdevice.isinf(tmp3).to(tl.int1)
tmp5 = 0.0
tmp6 = tl.where(tmp4, tmp5, tmp3)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_reciprocal_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp4 * tmp4
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = tl.where(tmp2, tmp3, tmp7)
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_index_put_max_3(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp0 = tl.full([1, 1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp4 = tmp3 * tmp3
tmp5 = tl.full([1, 1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tl.where(tmp1, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = triton_helpers.max2(tmp8, 1)[:, None]
tmp11 = libdevice.isinf(tmp6).to(tl.int1)
tmp12 = tl.where(tmp11, tmp10, tmp7)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_5(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (4 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_7(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (4 + r0), None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(4 + r0, [XBLOCK, RBLOCK]), tmp13, None)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_8(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (8 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_10(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (4 + r0), None)
tmp10 = tl.load(in_ptr1 + (8 + r0), None)
tmp0 = tl.full([1, 1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(8 + r0, [XBLOCK, RBLOCK]), tmp13, None)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_11(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (12 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_13(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + (8 + r0), None)
tmp10 = tl.load(in_ptr1 + (12 + r0), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 2, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK]), tmp13, None
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_14(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 12)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 13)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + 14)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + 15)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp26 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = r0
tmp2 = tl.full([1, 1], 3, tl.int32)
tmp3 = tmp1 == tmp2
tmp7 = tl.where(tmp3, tmp5, tmp6)
tmp8 = tmp0 * tmp7
tmp13 = tl.where(tmp3, tmp11, tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp8 + tmp14
tmp20 = tl.where(tmp3, tmp18, tmp19)
tmp21 = tmp16 * tmp20
tmp22 = tmp15 + tmp21
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tmp23 * tmp27
tmp29 = tmp22 + tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp7
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp13
tmp38 = tmp33 + tmp37
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp20
tmp43 = tmp38 + tmp42
tmp46 = tmp44 + tmp45
tmp47 = tmp46 * tmp27
tmp48 = tmp43 + tmp47
tmp49 = 2.0
tmp50 = tmp29 * tmp49
tmp51 = 1e-05
tmp52 = tmp50 + tmp51
tmp53 = tmp48 + tmp51
tmp54 = tmp52 / tmp53
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp61, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(16)](arg1_1, arg0_1, buf0, buf1,
buf29, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_index_put_lift_fresh_1[grid(4)](buf1, buf2, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_reciprocal_2[grid(16)](buf2, buf1, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
triton_per_fused_index_put_max_3[grid(1)](buf2, buf1, buf4, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del buf2
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_4[grid(16)](buf4, buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
triton_poi_fused_index_put_lift_fresh_5[grid(4)](buf4, buf7, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_6[grid(16)](buf7, buf11, 16, XBLOCK=16, num_warps=
1, num_stages=1)
triton_per_fused_index_put_max_7[grid(1)](buf7, buf4, buf11, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf14 = buf7
del buf7
triton_poi_fused_6[grid(16)](buf11, buf14, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_poi_fused_index_put_lift_fresh_8[grid(4)](buf11, buf14, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf18 = buf4
del buf4
triton_poi_fused_9[grid(16)](buf14, buf18, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_per_fused_index_put_max_10[grid(1)](buf14, buf11, buf18, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
buf21 = buf14
del buf14
triton_poi_fused_9[grid(16)](buf18, buf21, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_poi_fused_index_put_lift_fresh_11[grid(4)](buf18, buf21, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf25 = buf11
del buf11
triton_poi_fused_12[grid(16)](buf21, buf25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
triton_per_fused_index_put_max_13[grid(1)](buf21, buf18, buf25, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf18
del buf21
buf31 = empty_strided_cuda((), (), torch.float32)
buf32 = buf31
del buf31
triton_per_fused_add_div_mean_mul_rsub_sum_14[grid(1)](buf32, buf0,
buf25, buf1, buf29, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf25
del buf29
return buf32,
def one_hot(labels, num_classes):
"""
Converts label image `labels` to a one-hot vector with `num_classes` number of channels as last dimension.
"""
labels = labels % num_classes
y = np.eye(num_classes)
onehot = y[labels.flatten()]
return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels
.dtype)
class GeneralizedDiceLossNew(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, w_type: 'str'
='square', reduction: 'str'='mean'):
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
w_type ('square'|'simple'|'uniform'): type of function to transform ground truth volume to a weight factor.
Default: `'square'`
reduction (`none|mean|sum`): Specifies the reduction to apply to the output:
``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the batch size in the output,
``'sum'``: the output will be summed over the batch dim.
Default: ``'mean'``.
"""
super().__init__(reduction=reduction)
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'reduction={reduction} is invalid. Valid options are: none, mean or sum.'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
if sigmoid and softmax:
raise ValueError(
'sigmoid=True and softmax=True are not compatible.')
self.sigmoid = sigmoid
self.softmax = softmax
self.w_func: 'Callable' = torch.ones_like
if w_type == 'simple':
self.w_func = torch.reciprocal
elif w_type == 'square':
self.w_func = lambda x: torch.reciprocal(x * x)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JanSellner/MONAI
|
GeneralizedDiceLoss
| false | 9,185 |
[
"Apache-2.0"
] | 0 |
ff8fa2bae94914030abb1bc0680417fdaa74afd8
|
https://github.com/JanSellner/MONAI/tree/ff8fa2bae94914030abb1bc0680417fdaa74afd8
|
Critic
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5r/c5rx7e67so2ghjdm63ymrwubp7slgir3fb5you4pgmqjrbeq7xma.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_3 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_6], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.load(in_ptr2 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 * tmp8
tmp10 = tl.load(in_ptr3 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr4 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = tmp0 >= tmp3
tmp19 = tl.full([1], 8, tl.int64)
tmp20 = tmp0 < tmp19
tmp21 = tl.load(in_ptr5 + ((4*x1) + ((-4) + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp4, tmp17, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/25/c25dp2smzhhl7kgpiwol4lukikso4pgqfawwgfij7ozpnb3zn57x.py
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.native_layer_norm, aten.relu]
# Source node to ATen node mapping:
# x_5 => add_2, add_3, mul_2, mul_3, rsqrt_1, sub_1, var_mean_1
# x_6 => relu_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_9), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_10), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_3,), kwargs = {})
triton_poi_fused_native_layer_norm_relu_2 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (1, 4), (4, 1))
assert_size_stride(primals_12, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf0, buf1, buf2, primals_4, primals_5, primals_6, buf3, 32, grid=grid(32), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = buf2; del buf2 # reuse
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf4, buf5, buf6, 4, grid=grid(4), stream=stream0)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.native_layer_norm, aten.relu]
triton_poi_fused_native_layer_norm_relu_2.run(buf4, buf5, buf6, primals_9, primals_10, buf7, 16, grid=grid(16), stream=stream0)
del buf5
del primals_10
buf9 = reinterpret_tensor(buf6, (4, 1), (1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [V], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, buf7, reinterpret_tensor(primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_12
return (buf9, primals_1, primals_4, primals_5, primals_9, buf0, buf3, buf4, buf7, primals_11, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space):
super(Critic, self).__init__()
self.action_space = action_space
num_outputs = action_space.shape[0]
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size + num_outputs, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.V = nn.Linear(hidden_size, 1)
self.V.weight.data.mul_(0.1)
self.V.bias.data.mul_(0.1)
def forward(self, inputs, actions):
x = inputs
x = self.linear1(x)
x = self.ln1(x)
x = F.relu(x)
x = torch.cat((x, actions), 1)
x = self.linear2(x)
x = self.ln2(x)
x = F.relu(x)
V = self.V(x)
return V
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_inputs': 4, 'action_space': torch.
rand([4, 4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tmp7 * tmp8
tmp10 = tl.load(in_ptr3 + x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr4 + x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp21 = tl.load(in_ptr5 + (4 * x1 + (-4 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp4, tmp17, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (1, 4), (4, 1))
assert_size_stride(primals_12, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](buf0, buf1, buf2, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](buf0, buf1, buf2, primals_4,
primals_5, primals_6, buf3, 32, XBLOCK=32, num_warps=1,
num_stages=1)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = buf2
del buf2
buf6 = buf1
del buf1
triton_poi_fused_native_layer_norm_0[grid(4)](buf4, buf5, buf6, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_2[grid(16)](buf4, buf5,
buf6, primals_9, primals_10, buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf5
del primals_10
buf9 = reinterpret_tensor(buf6, (4, 1), (1, 1), 0)
del buf6
extern_kernels.addmm(primals_12, buf7, reinterpret_tensor(
primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_12
return (buf9, primals_1, primals_4, primals_5, primals_9, buf0, buf3,
buf4, buf7, primals_11, primals_7)
class CriticNew(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space):
super(CriticNew, self).__init__()
self.action_space = action_space
num_outputs = action_space.shape[0]
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size + num_outputs, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.V = nn.Linear(hidden_size, 1)
self.V.weight.data.mul_(0.1)
self.V.bias.data.mul_(0.1)
def forward(self, input_0, input_1):
primals_1 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.ln1.weight
primals_5 = self.ln1.bias
primals_7 = self.linear2.weight
primals_8 = self.linear2.bias
primals_9 = self.ln2.weight
primals_10 = self.ln2.bias
primals_11 = self.V.weight
primals_12 = self.V.bias
primals_2 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
JieFeng-cse/power-system-rl
|
Critic
| false | 9,186 |
[
"MIT"
] | 0 |
8295d14da83a40c755b8e6a14785c53a238f9a64
|
https://github.com/JieFeng-cse/power-system-rl/tree/8295d14da83a40c755b8e6a14785c53a238f9a64
|
QNetwork
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf5, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256,
fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
"""*** YOUR CODE HERE ***"""
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf6, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), primals_6, buf5, primals_4, buf6
class QNetworkNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256,
fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
"""*** YOUR CODE HERE ***"""
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
KailinTong/my-deep-reinforcement-learning
|
QNetwork
| false | 9,188 |
[
"MIT"
] | 0 |
2b284ff9475965303a1c9906c5666064229a90f1
|
https://github.com/KailinTong/my-deep-reinforcement-learning/tree/2b284ff9475965303a1c9906c5666064229a90f1
|
ScaledDotProductAttention
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/db/cdbvpwij3es3hvx6e56eufnhc2ark7ffetlr353jjxbefbpe6ws4.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 4), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf1)
del arg1_1
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
"""Scaled Dot-Product Attention Module. This code is adopted from
https://github.com/jadore801120/attention-is-all-you-need-pytorch.
Args:
temperature (float): The scale factor for softmax input.
attn_dropout (float): Dropout layer on attn_output_weights.
"""
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, float('-inf'))
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf1
)
del arg1_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3
class ScaledDotProductAttentionNew(nn.Module):
"""Scaled Dot-Product Attention Module. This code is adopted from
https://github.com/jadore801120/attention-is-all-you-need-pytorch.
Args:
temperature (float): The scale factor for softmax input.
attn_dropout (float): Dropout layer on attn_output_weights.
"""
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
HolyCrap96/mmocr-1
|
ScaledDotProductAttention
| false | 9,190 |
[
"Apache-2.0"
] | 0 |
c6c4acd39b1c56fec1b87530b2d241fe8af4ceed
|
https://github.com/HolyCrap96/mmocr-1/tree/c6c4acd39b1c56fec1b87530b2d241fe8af4ceed
|
Actor
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6e/c6etrnrwnqlbwohg25wyqa2rkojloi6lu4ex24dati4k5ok6ywe4.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_layer_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# x_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_native_layer_norm_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + (x2), tmp10, xmask)
tl.store(out_ptr1 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7t/c7t6foqk4l4sxsc5fbssoo6uzou3tt63fxze34z3lo2nmbhpdmyn.py
# Topologically Sorted Source Nodes: [tanh, mu_1], Original ATen: [aten.tanh, aten.mul]
# Source node to ATen node mapping:
# mu_1 => mul_4
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 0.3), kwargs = {})
triton_poi_fused_mul_tanh_2 = async_compile.triton('triton_poi_fused_mul_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 0.3
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_layer_norm, aten.relu, aten.threshold_backward]
triton_poi_fused_native_layer_norm_relu_threshold_backward_1.run(buf0, buf1, buf2, primals_4, primals_5, buf3, buf11, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = buf2; del buf2 # reuse
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf4, buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.native_layer_norm, aten.relu, aten.threshold_backward]
triton_poi_fused_native_layer_norm_relu_threshold_backward_1.run(buf4, buf5, buf6, primals_8, primals_9, buf7, buf10, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_11
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, mu_1], Original ATen: [aten.tanh, aten.mul]
triton_poi_fused_mul_tanh_2.run(buf8, buf9, 256, grid=grid(256), stream=stream0)
return (buf9, primals_4, primals_8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, primals_10, buf10, primals_6, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space):
super(Actor, self).__init__()
self.action_space = action_space
num_outputs = action_space.shape[0]
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.mu = nn.Linear(hidden_size, num_outputs)
self.mu.weight.data.mul_(0.01)
self.mu.bias.data.mul_(0.01)
def forward(self, inputs):
x = inputs
x = self.linear1(x)
x = self.ln1(x)
x = F.relu(x)
x = self.linear2(x)
x = self.ln2(x)
x = F.relu(x)
mu = self.mu(x)
mu = torch.tanh(mu) * 0.3
assert mu.shape == inputs.shape, 'shape mismatch'
return mu
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_inputs': 4, 'action_space': torch.
rand([4, 4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 0.3
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_native_layer_norm_relu_threshold_backward_1[grid(256)
](buf0, buf1, buf2, primals_4, primals_5, buf3, buf11, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = buf2
del buf2
buf6 = buf1
del buf1
triton_poi_fused_native_layer_norm_0[grid(64)](buf4, buf5, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_native_layer_norm_relu_threshold_backward_1[grid(256)
](buf4, buf5, buf6, primals_8, primals_9, buf7, buf10, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del buf6
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_11
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_2[grid(256)](buf8, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf9, primals_4, primals_8, reinterpret_tensor(primals_1, (64, 4
), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), buf8, primals_10, buf10, primals_6, buf11
class ActorNew(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space):
super(ActorNew, self).__init__()
self.action_space = action_space
num_outputs = action_space.shape[0]
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.mu = nn.Linear(hidden_size, num_outputs)
self.mu.weight.data.mul_(0.01)
self.mu.bias.data.mul_(0.01)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.ln1.weight
primals_5 = self.ln1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_8 = self.ln2.weight
primals_9 = self.ln2.bias
primals_10 = self.mu.weight
primals_11 = self.mu.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
JieFeng-cse/power-system-rl
|
Actor
| false | 9,191 |
[
"MIT"
] | 0 |
8295d14da83a40c755b8e6a14785c53a238f9a64
|
https://github.com/JieFeng-cse/power-system-rl/tree/8295d14da83a40c755b8e6a14785c53a238f9a64
|
KL_Loss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/l3/cl3mqwaki56dc4zcxfjjgkbopnejxzhksqm6egdinynmjrsrw2qw.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pf/cpfkvifhrhobwuxls65xhwdpkryeblqmmtghouii4lp3rhe3crx4.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/d5/cd5jvj2xtf5vbrydrjr7gjura3e2lbc3fjgbn4anltegd6a6wh3n.py
# Topologically Sorted Source Nodes: [softmax, teacher_outputs, kl_div, output_batch, loss], Original ATen: [aten._softmax, aten.add, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div]
# Source node to ATen node mapping:
# kl_div => div_3, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1
# loss => mul_2
# output_batch => exp, log, sub_1, sum_1
# softmax => div_2, sum_2
# teacher_outputs => add
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %add : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, 1e-07), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%add,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %log_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %sub_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, 1), kwargs = {})
triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (r3), None)
tmp20 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = libdevice.isnan(tmp10).to(tl.int1)
tmp12 = 0.0
tmp13 = tmp10 == tmp12
tmp14 = tl_math.log(tmp10)
tmp15 = tmp10 * tmp14
tmp16 = tl.where(tmp13, tmp12, tmp15)
tmp17 = float("nan")
tmp18 = tl.where(tmp11, tmp17, tmp16)
tmp21 = tl_math.exp(tmp20)
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tl_math.log(tmp30)
tmp32 = tmp19 - tmp31
tmp33 = tmp10 * tmp32
tmp34 = tmp18 - tmp33
tmp35 = tl.broadcast_to(tmp34, [RBLOCK])
tmp37 = triton_helpers.promote_to_tensor(tl.sum(tmp35, 0))
tmp38 = 0.25
tmp39 = tmp37 * tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp41, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax, teacher_outputs, kl_div, output_batch, loss], Original ATen: [aten._softmax, aten.add, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div]
triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils
class KL_Loss(nn.Module):
def __init__(self, temperature=1):
super(KL_Loss, self).__init__()
self.T = temperature
def forward(self, output_batch, teacher_outputs):
output_batch = F.log_softmax(output_batch / self.T, dim=1)
teacher_outputs = F.softmax(teacher_outputs / self.T, dim=1) + 10 ** -7
loss = self.T * self.T * nn.KLDivLoss(reduction='batchmean')(
output_batch, teacher_outputs)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + r3, None)
tmp20 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = libdevice.isnan(tmp10).to(tl.int1)
tmp12 = 0.0
tmp13 = tmp10 == tmp12
tmp14 = tl_math.log(tmp10)
tmp15 = tmp10 * tmp14
tmp16 = tl.where(tmp13, tmp12, tmp15)
tmp17 = float('nan')
tmp18 = tl.where(tmp11, tmp17, tmp16)
tmp21 = tl_math.exp(tmp20)
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tl_math.log(tmp30)
tmp32 = tmp19 - tmp31
tmp33 = tmp10 * tmp32
tmp34 = tmp18 - tmp33
tmp35 = tl.broadcast_to(tmp34, [RBLOCK])
tmp37 = triton_helpers.promote_to_tensor(tl.sum(tmp35, 0))
tmp38 = 0.25
tmp39 = tmp37 * tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp41, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2[grid
(1)](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class KL_LossNew(nn.Module):
def __init__(self, temperature=1):
super(KL_LossNew, self).__init__()
self.T = temperature
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
BlakeDai/FedML-test
|
KL_Loss
| false | 9,192 |
[
"Apache-2.0"
] | 0 |
3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
https://github.com/BlakeDai/FedML-test/tree/3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
Critic
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [sa], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# sa => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/y2/cy2lwgz7dq2q2z4ifepdde4l7vyyvrwcx4zjn2ezmtzcanvhv374.py
# Topologically Sorted Source Nodes: [q1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# q1 => relu
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (256, 8), (8, 1))
assert_size_stride(primals_4, (256, ), (1, ))
assert_size_stride(primals_5, (256, 256), (256, 1))
assert_size_stride(primals_6, (256, ), (1, ))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1, ), (1, ))
assert_size_stride(primals_9, (256, 8), (8, 1))
assert_size_stride(primals_10, (256, ), (1, ))
assert_size_stride(primals_11, (256, 256), (256, 1))
assert_size_stride(primals_12, (256, ), (1, ))
assert_size_stride(primals_13, (1, 256), (256, 1))
assert_size_stride(primals_14, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [sa], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [q1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 1024, grid=grid(1024), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), (1, 256), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [q1_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf4, primals_6, 1024, grid=grid(1024), stream=stream0)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [q1_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 256), (1, 8), 0), out=buf7)
del primals_9
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [q2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf8, primals_10, 1024, grid=grid(1024), stream=stream0)
del primals_10
buf9 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (256, 256), (1, 256), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [q2_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf10, primals_12, 1024, grid=grid(1024), stream=stream0)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [q2_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(primals_13, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (256, 8), (8, 1))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (256, 256), (256, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (256, 8), (8, 1))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256, 256), (256, 1))
assert_size_stride(primals_12, (256,), (1,))
assert_size_stride(primals_13, (1, 256), (256, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1024)](buf2, primals_4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), (
1, 256), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(1024)](buf4, primals_6, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 256), (1,
8), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(1024)](buf8, primals_10, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (256, 256),
(1, 256), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_1[grid(1024)](buf10, primals_12, 1024, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
class CriticNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(CriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_9 = self.l4.weight
primals_10 = self.l4.bias
primals_11 = self.l5.weight
primals_12 = self.l5.bias
primals_13 = self.l6.weight
primals_14 = self.l6.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
Kelym/TD3
|
Critic
| false | 9,194 |
[
"MIT"
] | 0 |
ea565c9d6f74aeb47b096538274cbd5ffc657de5
|
https://github.com/Kelym/TD3/tree/ea565c9d6f74aeb47b096538274cbd5ffc657de5
|
Conv2dDynamicSamePadding
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 2, 1, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils
class Conv2dDynamicSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]
] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] +
1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] +
1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class Conv2dDynamicSamePaddingNew(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]
] * 2
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
BlakeDai/FedML-test
|
Conv2dDynamicSamePadding
| false | 9,196 |
[
"Apache-2.0"
] | 0 |
3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
https://github.com/BlakeDai/FedML-test/tree/3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
LogisticRegression
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/jq/cjqaq2meov3vkcgfealq7w4w35tw2oemvmhneuxmigeoumva22p7.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# outputs => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils
class LogisticRegression(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
def forward(self, x):
outputs = torch.sigmoid(self.linear(x))
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class LogisticRegressionNew(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegressionNew, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
BlakeDai/FedML-test
|
LogisticRegression
| false | 9,197 |
[
"Apache-2.0"
] | 0 |
3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
https://github.com/BlakeDai/FedML-test/tree/3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
MaxPool2dDynamicSamePadding
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/qq/cqqb7lg6q4ad6ao65faglzxqzvt3ad3sdvodh7k57sce43e7szvx.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp8 & tmp13
tmp16 = tmp15 & tmp14
tmp17 = tl.load(in_ptr0 + ((-4) + x4), tmp16 & xmask, other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp8 & tmp20
tmp23 = tmp22 & tmp21
tmp24 = tl.load(in_ptr0 + ((-3) + x4), tmp23 & xmask, other=0.0)
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp8 & tmp27
tmp30 = tmp29 & tmp28
tmp31 = tl.load(in_ptr0 + ((-2) + x4), tmp30 & xmask, other=0.0)
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp6
tmp38 = tmp37 & tmp7
tmp39 = tl.load(in_ptr0 + ((-1) + x4), tmp38 & xmask, other=0.0)
tmp40 = triton_helpers.maximum(tmp39, tmp32)
tmp41 = tmp36 & tmp13
tmp42 = tmp41 & tmp14
tmp43 = tl.load(in_ptr0 + (x4), tmp42 & xmask, other=0.0)
tmp44 = triton_helpers.maximum(tmp43, tmp40)
tmp45 = tmp36 & tmp20
tmp46 = tmp45 & tmp21
tmp47 = tl.load(in_ptr0 + (1 + x4), tmp46 & xmask, other=0.0)
tmp48 = triton_helpers.maximum(tmp47, tmp44)
tmp49 = tmp36 & tmp27
tmp50 = tmp49 & tmp28
tmp51 = tl.load(in_ptr0 + (2 + x4), tmp50 & xmask, other=0.0)
tmp52 = triton_helpers.maximum(tmp51, tmp48)
tmp53 = 1 + x1
tmp54 = tmp53 >= tmp1
tmp55 = tmp53 < tmp3
tmp56 = tmp54 & tmp55
tmp57 = tmp56 & tmp6
tmp58 = tmp57 & tmp7
tmp59 = tl.load(in_ptr0 + (3 + x4), tmp58 & xmask, other=0.0)
tmp60 = triton_helpers.maximum(tmp59, tmp52)
tmp61 = tmp56 & tmp13
tmp62 = tmp61 & tmp14
tmp63 = tl.load(in_ptr0 + (4 + x4), tmp62 & xmask, other=0.0)
tmp64 = triton_helpers.maximum(tmp63, tmp60)
tmp65 = tmp56 & tmp20
tmp66 = tmp65 & tmp21
tmp67 = tl.load(in_ptr0 + (5 + x4), tmp66 & xmask, other=0.0)
tmp68 = triton_helpers.maximum(tmp67, tmp64)
tmp69 = tmp56 & tmp27
tmp70 = tmp69 & tmp28
tmp71 = tl.load(in_ptr0 + (6 + x4), tmp70 & xmask, other=0.0)
tmp72 = triton_helpers.maximum(tmp71, tmp68)
tmp73 = 2 + x1
tmp74 = tmp73 >= tmp1
tmp75 = tmp73 < tmp3
tmp76 = tmp74 & tmp75
tmp77 = tmp76 & tmp6
tmp78 = tmp77 & tmp7
tmp79 = tl.load(in_ptr0 + (7 + x4), tmp78 & xmask, other=0.0)
tmp80 = triton_helpers.maximum(tmp79, tmp72)
tmp81 = tmp76 & tmp13
tmp82 = tmp81 & tmp14
tmp83 = tl.load(in_ptr0 + (8 + x4), tmp82 & xmask, other=0.0)
tmp84 = triton_helpers.maximum(tmp83, tmp80)
tmp85 = tmp76 & tmp20
tmp86 = tmp85 & tmp21
tmp87 = tl.load(in_ptr0 + (9 + x4), tmp86 & xmask, other=0.0)
tmp88 = triton_helpers.maximum(tmp87, tmp84)
tmp89 = tmp76 & tmp27
tmp90 = tmp89 & tmp28
tmp91 = tl.load(in_ptr0 + (10 + x4), tmp90 & xmask, other=0.0)
tmp92 = triton_helpers.maximum(tmp91, tmp88)
tl.store(out_ptr0 + (x4), tmp92, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int
) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.
kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int
) else self.dilation
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] +
1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] +
1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2])
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4, 'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp8 & tmp13
tmp16 = tmp15 & tmp14
tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp8 & tmp20
tmp23 = tmp22 & tmp21
tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=0.0)
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp8 & tmp27
tmp30 = tmp29 & tmp28
tmp31 = tl.load(in_ptr0 + (-2 + x4), tmp30 & xmask, other=0.0)
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp6
tmp38 = tmp37 & tmp7
tmp39 = tl.load(in_ptr0 + (-1 + x4), tmp38 & xmask, other=0.0)
tmp40 = triton_helpers.maximum(tmp39, tmp32)
tmp41 = tmp36 & tmp13
tmp42 = tmp41 & tmp14
tmp43 = tl.load(in_ptr0 + x4, tmp42 & xmask, other=0.0)
tmp44 = triton_helpers.maximum(tmp43, tmp40)
tmp45 = tmp36 & tmp20
tmp46 = tmp45 & tmp21
tmp47 = tl.load(in_ptr0 + (1 + x4), tmp46 & xmask, other=0.0)
tmp48 = triton_helpers.maximum(tmp47, tmp44)
tmp49 = tmp36 & tmp27
tmp50 = tmp49 & tmp28
tmp51 = tl.load(in_ptr0 + (2 + x4), tmp50 & xmask, other=0.0)
tmp52 = triton_helpers.maximum(tmp51, tmp48)
tmp53 = 1 + x1
tmp54 = tmp53 >= tmp1
tmp55 = tmp53 < tmp3
tmp56 = tmp54 & tmp55
tmp57 = tmp56 & tmp6
tmp58 = tmp57 & tmp7
tmp59 = tl.load(in_ptr0 + (3 + x4), tmp58 & xmask, other=0.0)
tmp60 = triton_helpers.maximum(tmp59, tmp52)
tmp61 = tmp56 & tmp13
tmp62 = tmp61 & tmp14
tmp63 = tl.load(in_ptr0 + (4 + x4), tmp62 & xmask, other=0.0)
tmp64 = triton_helpers.maximum(tmp63, tmp60)
tmp65 = tmp56 & tmp20
tmp66 = tmp65 & tmp21
tmp67 = tl.load(in_ptr0 + (5 + x4), tmp66 & xmask, other=0.0)
tmp68 = triton_helpers.maximum(tmp67, tmp64)
tmp69 = tmp56 & tmp27
tmp70 = tmp69 & tmp28
tmp71 = tl.load(in_ptr0 + (6 + x4), tmp70 & xmask, other=0.0)
tmp72 = triton_helpers.maximum(tmp71, tmp68)
tmp73 = 2 + x1
tmp74 = tmp73 >= tmp1
tmp75 = tmp73 < tmp3
tmp76 = tmp74 & tmp75
tmp77 = tmp76 & tmp6
tmp78 = tmp77 & tmp7
tmp79 = tl.load(in_ptr0 + (7 + x4), tmp78 & xmask, other=0.0)
tmp80 = triton_helpers.maximum(tmp79, tmp72)
tmp81 = tmp76 & tmp13
tmp82 = tmp81 & tmp14
tmp83 = tl.load(in_ptr0 + (8 + x4), tmp82 & xmask, other=0.0)
tmp84 = triton_helpers.maximum(tmp83, tmp80)
tmp85 = tmp76 & tmp20
tmp86 = tmp85 & tmp21
tmp87 = tl.load(in_ptr0 + (9 + x4), tmp86 & xmask, other=0.0)
tmp88 = triton_helpers.maximum(tmp87, tmp84)
tmp89 = tmp76 & tmp27
tmp90 = tmp89 & tmp28
tmp91 = tl.load(in_ptr0 + (10 + x4), tmp90 & xmask, other=0.0)
tmp92 = triton_helpers.maximum(tmp91, tmp88)
tl.store(out_ptr0 + x4, tmp92, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPool2dDynamicSamePaddingNew(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int
) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.
kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int
) else self.dilation
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BlakeDai/FedML-test
|
MaxPool2dDynamicSamePadding
| false | 9,198 |
[
"Apache-2.0"
] | 0 |
3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
https://github.com/BlakeDai/FedML-test/tree/3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
Swish
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6b/c6bpxckx47becppkk5ixcba2hybdk775hnag55qn2o7x3tn3gaks.py
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.utils
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SwishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BlakeDai/FedML-test
|
Swish
| false | 9,199 |
[
"Apache-2.0"
] | 0 |
3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
https://github.com/BlakeDai/FedML-test/tree/3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
MemoryEfficientSwish
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6b/c6bpxckx47becppkk5ixcba2hybdk775hnag55qn2o7x3tn3gaks.py
# Topologically Sorted Source Nodes: [sigmoid, result], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# result => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, result], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.utils
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BlakeDai/FedML-test
|
MemoryEfficientSwish
| false | 9,200 |
[
"Apache-2.0"
] | 0 |
3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
https://github.com/BlakeDai/FedML-test/tree/3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
MultiHeadAttn
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/fq/cfqwyletxlxsztzvms4ugcujphw6sshjg2bm6qtmkz3kg7omhsdb.py
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_score => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (32*y1) + (128*x2)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/q2/cq2bhrixv5xavijmo4bsnrvtws5kwvqhdummdlzij7fvpl7fcfb5.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => amax, clone_1, exp, sub
# Graph fragment:
# %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kl/ckl6u3t54hef2b5wjd5dpgg7u4q64ygtqnuha2usouwtpaivlz52.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + ((4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7t/c7tefgs6fnwuoixspsoxpumnyhdslyt74yuzsspadja5iim4s57k.py
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_vec => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_14,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + (4*x2) + (32*x3) + (128*x1)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/r2/cr2qmbufonkcpvzj5nto7mm2yb255fxx7ca2wtbl5deiamneh6dk.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_21,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_24), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/iz/cizh7p23zwsiqbrt6dvrlvjzpyujwvyyaolptfk5xtby6foymiaz.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_24), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_6), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [head_q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, buf2, 64, 4, grid=grid(64, 4), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32)
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 16, 16, grid=grid(16, 16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf1, buf6, 256, grid=grid(256), stream=stream0)
del buf1
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf10, buf11, primals_5, primals_6, buf12, 64, grid=grid(64), stream=stream0)
del buf10
del buf11
del primals_6
return (buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float
('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf')
)
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.
size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = h + attn_out
else:
output = self.layer_norm(h + attn_out)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 32 * y1 + 128 * x2), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 32 * x3 + 128 * x1), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_2[grid(16, 16)](buf4, buf5, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(256)](buf1, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf9,
buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_6
return buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16,
16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
class MultiHeadAttnNew(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttnNew, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, input_0):
primals_2 = self.q_net.weight
primals_3 = self.kv_net.weight
primals_4 = self.o_net.weight
primals_5 = self.layer_norm.weight
primals_6 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
JingzhaoZhang/transformerxl-noise
|
MultiHeadAttn
| false | 9,201 |
[
"Apache-2.0"
] | 0 |
83b91c505217da2a32b6ca592e01b4a1e941937b
|
https://github.com/JingzhaoZhang/transformerxl-noise/tree/83b91c505217da2a32b6ca592e01b4a1e941937b
|
ZeroPad1d
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zg/czgid72b6fkrabkbzh6dovarzeothmiqpqb7azoiz6xeuyfhgius.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [4, 4], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = (-4) + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 768, grid=grid(768), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
import torch.distributed
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pad_left': 4, 'pad_right': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = -4 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(768)](arg0_1, buf0, 768,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ZeroPad1dNew(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DCMMC/chineseocr
|
ZeroPad1d
| false | 9,202 |
[
"MIT"
] | 0 |
0b8772615239ea7f212b1ab5bc75183e7e9f16b0
|
https://github.com/DCMMC/chineseocr/tree/0b8772615239ea7f212b1ab5bc75183e7e9f16b0
|
MiCrossEntropyLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/za/cza3krejiqg7tjfagjknvrkfb4hawyqjlraihwndrbdsaurpzx3y.py
# Topologically Sorted Source Nodes: [cross_entropy, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mean]
# Source node to ATen node mapping:
# cross_entropy => div, exp, log, mul, neg, sub_1, sum_1, sum_2
# mean => mean
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused__log_softmax_div_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mean_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = 1.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [cross_entropy, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mean]
triton_per_fused__log_softmax_div_mean_mul_neg_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class MiCrossEntropyLoss(torch.nn.Module):
def __init__(self):
super(MiCrossEntropyLoss, self).__init__()
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, mi_cls_output, label, **_):
return self.ce_loss(mi_cls_output, label).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mean_mul_neg_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = 1.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mean_mul_neg_sum_1[grid(1)](buf2,
buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class MiCrossEntropyLossNew(torch.nn.Module):
def __init__(self):
super(MiCrossEntropyLossNew, self).__init__()
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Jinoh-Cho/Visual-Genome-Image-Inpainting
|
MiCrossEntropyLoss
| false | 9,203 |
[
"MIT"
] | 0 |
f8c43bf2e4a9139d4c35903d0c323b9d8eb54859
|
https://github.com/Jinoh-Cho/Visual-Genome-Image-Inpainting/tree/f8c43bf2e4a9139d4c35903d0c323b9d8eb54859
|
Model
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/sa/csap5cthikzuwdmqu2ehow73ilvezq4ym46wfjbhn3cebigfe7xy.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.softplus]
# Source node to ATen node mapping:
# x_1 => div, exp, gt, log1p, mul, where
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %div), kwargs = {})
triton_poi_fused_softplus_0 = async_compile.triton('triton_poi_fused_softplus_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.softplus]
triton_poi_fused_softplus_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from torch.functional import Tensor
from torch import Tensor
from torch import nn
class Model(nn.Module):
def __init__(self, input_n: 'int', output_n: 'int', hidden_n: 'int'
) ->None:
super().__init__()
self.input_shape = input_n,
self.output_shape = output_n,
self.hidden_n = hidden_n
self.acctivate = nn.Softplus()
self.fc1 = nn.Linear(input_n, self.hidden_n)
self.fc2 = nn.Linear(self.hidden_n, self.hidden_n)
self.fc3 = nn.Linear(self.hidden_n, output_n)
def forward(self, x: 'Tensor') ->Tensor:
x = self.fc1(x)
x = self.acctivate(x)
x = self.fc2(x)
x = self.acctivate(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_n': 4, 'output_n': 4, 'hidden_n': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_softplus_0[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), primals_6, primals_4
class ModelNew(nn.Module):
def __init__(self, input_n: 'int', output_n: 'int', hidden_n: 'int'
) ->None:
super().__init__()
self.input_shape = input_n,
self.output_shape = output_n,
self.hidden_n = hidden_n
self.acctivate = nn.Softplus()
self.fc1 = nn.Linear(input_n, self.hidden_n)
self.fc2 = nn.Linear(self.hidden_n, self.hidden_n)
self.fc3 = nn.Linear(self.hidden_n, output_n)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Kkun84/DifferentialEquation
|
Model
| false | 9,204 |
[
"MIT"
] | 0 |
9da2681366363f15512f09a6aa1c640c56a0a754
|
https://github.com/Kkun84/DifferentialEquation/tree/9da2681366363f15512f09a6aa1c640c56a0a754
|
CE_Loss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/jh/cjhafiazvhnqzahvgpeyzzxgeb5atp7ebiv4plitnblial63qxb6.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1), kwargs = {})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/k4/ck4jcdo7pholu6j7hyu5a5wtavlli6c52bnxxxqwlta7ah2dps4s.py
# Topologically Sorted Source Nodes: [teacher_outputs], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# teacher_outputs => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mn/cmnlidmjpz2yihyn6sanrop2yyzhcy2ynecgwnndb5sydmmmfwas.py
# Topologically Sorted Source Nodes: [output_batch, teacher_outputs, mul, sum_1, mul_1, loss], Original ATen: [aten._log_softmax, aten._softmax, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# loss => div_3
# mul => mul
# mul_1 => mul_1
# output_batch => exp, log, sub_1, sum_1
# sum_1 => sum_3
# teacher_outputs => div_2, sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %div_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, -1), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 4), kwargs = {})
triton_per_fused__log_softmax__softmax_div_mul_sum_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_div_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_div_mul_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp15 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 / tmp21
tmp23 = tmp13 * tmp22
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = -1.0
tmp28 = tmp26 * tmp27
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [teacher_outputs], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [output_batch, teacher_outputs, mul, sum_1, mul_1, loss], Original ATen: [aten._log_softmax, aten._softmax, aten.mul, aten.sum, aten.div]
triton_per_fused__log_softmax__softmax_div_mul_sum_2.run(buf4, buf0, buf1, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils
class CE_Loss(nn.Module):
def __init__(self, temperature=1):
super(CE_Loss, self).__init__()
self.T = temperature
def forward(self, output_batch, teacher_outputs):
output_batch = F.log_softmax(output_batch / self.T, dim=1)
teacher_outputs = F.softmax(teacher_outputs / self.T, dim=1)
loss = -self.T * self.T * torch.sum(torch.mul(output_batch,
teacher_outputs)) / teacher_outputs.size(0)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sum_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp15 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 / tmp21
tmp23 = tmp13 * tmp22
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = -1.0
tmp28 = tmp26 * tmp27
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](arg1_1, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_div_mul_sum_2[grid(1)](buf4,
buf0, buf1, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf1
return buf4,
class CE_LossNew(nn.Module):
def __init__(self, temperature=1):
super(CE_LossNew, self).__init__()
self.T = temperature
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
BlakeDai/FedML-test
|
CE_Loss
| false | 9,205 |
[
"Apache-2.0"
] | 0 |
3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
https://github.com/BlakeDai/FedML-test/tree/3cb9a7234f3f0294f3137e4be572153ba7b62f8f
|
ScaleNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5i/c5iachkxsftg5m5zniolyz4aeojcm42muwsrb2a34ygecaxk5f47.py
# Topologically Sorted Source Nodes: [norm, clamp, norm_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
# Source node to ATen node mapping:
# clamp => clamp_min
# norm => pow_1, pow_2, sum_1
# norm_1 => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {})
triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-05
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp1 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pq/cpqdtx3g3pyjzwakdnj54l47sm3lyvtzkr3iagg7am3bkbp6ommo.py
# Topologically Sorted Source Nodes: [norm, clamp, norm_1, mul], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul]
# Source node to ATen node mapping:
# clamp => clamp_min
# mul => mul
# norm => pow_1, pow_2, sum_1
# norm_1 => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_poi_fused_clamp_div_linalg_vector_norm_mul_1 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [norm, clamp, norm_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0.run(primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, clamp, norm_1, mul], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul]
triton_poi_fused_clamp_div_linalg_vector_norm_mul_1.run(primals_2, buf0, buf1, 256, grid=grid(256), stream=stream0)
del buf0
return (buf1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.nn as nn
import torch.nn.parallel
class ScaleNorm(nn.Module):
"""Apply Scale Normalization to input.
The ScaleNorm layer first computes the square root of the scale, then computes the matrix/vector norm of the input tensor.
The norm value is calculated as `sqrt(scale) / matrix norm`.
Finally, the result is returned as `input_tensor * norm value`.
This layer can be used instead of LayerNorm when a scaled version of the norm is required.
Instead of performing the scaling operation (`scale / norm`) in a lambda-like layer, we are defining it within this layer to make prototyping more efficient.
References
----------
.. [1] Lukasz Maziarka et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> from deepchem.models.torch_models.layers import ScaleNorm
>>> scale = 0.35
>>> layer = ScaleNorm(scale)
>>> input_tensor = torch.tensor([[1.269, 39.36], [0.00918, -9.12]])
>>> output_tensor = layer(input_tensor)
"""
def __init__(self, scale: 'float', eps: 'float'=1e-05):
"""Initialize a ScaleNorm layer.
Parameters
----------
scale: float
Scale magnitude.
eps: float
Epsilon value. Default = 1e-5.
"""
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-05
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp1 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(64)](primals_1,
primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_div_linalg_vector_norm_mul_1[grid(256)](
primals_2, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return buf1, primals_2
class ScaleNormNew(nn.Module):
"""Apply Scale Normalization to input.
The ScaleNorm layer first computes the square root of the scale, then computes the matrix/vector norm of the input tensor.
The norm value is calculated as `sqrt(scale) / matrix norm`.
Finally, the result is returned as `input_tensor * norm value`.
This layer can be used instead of LayerNorm when a scaled version of the norm is required.
Instead of performing the scaling operation (`scale / norm`) in a lambda-like layer, we are defining it within this layer to make prototyping more efficient.
References
----------
.. [1] Lukasz Maziarka et al. "Molecule Attention Transformer" Graph Representation Learning workshop and Machine Learning and the Physical Sciences workshop at NeurIPS 2019. 2020. https://arxiv.org/abs/2002.08264
Examples
--------
>>> from deepchem.models.torch_models.layers import ScaleNorm
>>> scale = 0.35
>>> layer = ScaleNorm(scale)
>>> input_tensor = torch.tensor([[1.269, 39.36], [0.00918, -9.12]])
>>> output_tensor = layer(input_tensor)
"""
def __init__(self, scale: 'float', eps: 'float'=1e-05):
"""Initialize a ScaleNorm layer.
Parameters
----------
scale: float
Scale magnitude.
eps: float
Epsilon value. Default = 1e-5.
"""
super(ScaleNormNew, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
JoseAntonioSiguenza/deepchem
|
ScaleNorm
| false | 9,206 |
[
"MIT"
] | 0 |
05fe1b186ec154e18de9aa1b110e9258dc484e21
|
https://github.com/JoseAntonioSiguenza/deepchem/tree/05fe1b186ec154e18de9aa1b110e9258dc484e21
|
UPChannelBAN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/eh/ceh2tavoj4zbzeixfhcp4k45qw7zbbme7m2weqdqilclzphzdslt.py
# Topologically Sorted Source Nodes: [cls_kernel, pk], Original ATen: [aten.convolution, aten.view]
# Source node to ATen node mapping:
# cls_kernel => convolution
# pk => view
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%convolution, [-1, 256, 62, 62]), kwargs = {})
triton_poi_fused_convolution_view_0 = async_compile.triton('triton_poi_fused_convolution_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7872512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x1 = (xindex // 3844) % 512
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/y6/cy6qx57jpiu4vlxl2g25y7rel2psvzfz7ebvhvgjv3bc3t32qrmr.py
# Topologically Sorted Source Nodes: [cls_feature], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# cls_feature => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_8, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3936256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3844) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xq/cxq3v2kja3zv4alli7z5uribrvjd3atbdmdoxvcj4tw3dddxqyxq.py
# Topologically Sorted Source Nodes: [loc_kernel, pk_1], Original ATen: [aten.convolution, aten.view]
# Source node to ATen node mapping:
# loc_kernel => convolution_1
# pk_1 => view_3
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %view_3 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%convolution_1, [-1, 256, 62, 62]), kwargs = {})
triton_poi_fused_convolution_view_2 = async_compile.triton('triton_poi_fused_convolution_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_view_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_view_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 15745024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x1 = (xindex // 3844) % 1024
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5v/c5vp5kwiougsjrt24sbuebc3tdgtaqyy6acw5g4woberjney646p.py
# Topologically Sorted Source Nodes: [loc], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# loc => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_5, %primals_11, %primals_12, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_2, (512, ), (1, ))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_4, (1024, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (1024, ), (1, ))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (256, ), (1, ))
assert_size_stride(primals_11, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [cls_kernel], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 512, 62, 62), (1968128, 3844, 62, 1))
# Topologically Sorted Source Nodes: [loc_kernel], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1024, 62, 62), (3936256, 3844, 62, 1))
# Topologically Sorted Source Nodes: [cls_feature], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_8, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 62, 62), (984064, 3844, 62, 1))
# Topologically Sorted Source Nodes: [loc_feature], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_8, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 62, 62), (984064, 3844, 62, 1))
buf4 = buf0; del buf0 # reuse
buf5 = reinterpret_tensor(buf4, (8, 256, 62, 62), (984064, 3844, 62, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [cls_kernel, pk], Original ATen: [aten.convolution, aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_view_0.run(buf5, primals_2, 7872512, grid=grid(7872512), stream=stream0)
del primals_2
buf6 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cls_feature], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf6, primals_7, 3936256, grid=grid(3936256), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [po], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 1024, 62, 62), (0, 3844, 62, 1), 0), buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf7, (1, 8, 1, 1), (8, 1, 1, 1))
buf8 = buf1; del buf1 # reuse
buf9 = reinterpret_tensor(buf8, (16, 256, 62, 62), (984064, 3844, 62, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [loc_kernel, pk_1], Original ATen: [aten.convolution, aten.view]
triton_poi_fused_convolution_view_2.run(buf9, primals_5, 15745024, grid=grid(15745024), stream=stream0)
del primals_5
buf10 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [loc_feature], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf10, primals_10, 3936256, grid=grid(3936256), stream=stream0)
del primals_10
# Topologically Sorted Source Nodes: [po_2], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (1, 1024, 62, 62), (0, 3844, 62, 1), 0), buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf11, (1, 16, 1, 1), (16, 1, 1, 1))
# Topologically Sorted Source Nodes: [loc], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4, 1, 1), (4, 1, 1, 1), 0), primals_11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 1, 1), (4, 1, 1, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [loc], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf13, primals_12, 16, grid=grid(16), stream=stream0)
del primals_12
return (reinterpret_tensor(buf7, (4, 2, 1, 1), (2, 1, 1, 1), 0), buf13, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, primals_11, buf5, reinterpret_tensor(buf6, (1, 1024, 62, 62), (3936256, 3844, 62, 1), 0), buf9, reinterpret_tensor(buf10, (1, 1024, 62, 62), (3936256, 3844, 62, 1), 0), reinterpret_tensor(buf11, (4, 4, 1, 1), (4, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1024, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def xcorr_fast(x, kernel):
"""group conv2d to calculate cross correlation, fast version
"""
batch = kernel.size()[0]
pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3])
px = x.view(1, -1, x.size()[2], x.size()[3])
po = F.conv2d(px, pk, groups=batch)
po = po.view(batch, -1, po.size()[2], po.size()[3])
return po
class BAN(nn.Module):
def __init__(self):
super(BAN, self).__init__()
def forward(self, z_f, x_f):
raise NotImplementedError
class UPChannelBAN(BAN):
def __init__(self, feature_in=256, cls_out_channels=2):
super(UPChannelBAN, self).__init__()
cls_output = cls_out_channels
loc_output = 4
self.template_cls_conv = nn.Conv2d(feature_in, feature_in *
cls_output, kernel_size=3)
self.template_loc_conv = nn.Conv2d(feature_in, feature_in *
loc_output, kernel_size=3)
self.search_cls_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3)
self.search_loc_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3)
self.loc_adjust = nn.Conv2d(loc_output, loc_output, kernel_size=1)
def forward(self, z_f, x_f):
cls_kernel = self.template_cls_conv(z_f)
loc_kernel = self.template_loc_conv(z_f)
cls_feature = self.search_cls_conv(x_f)
loc_feature = self.search_loc_conv(x_f)
cls = xcorr_fast(cls_feature, cls_kernel)
loc = self.loc_adjust(xcorr_fast(loc_feature, loc_kernel))
return cls, loc
def get_inputs():
return [torch.rand([4, 256, 64, 64]), torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_view_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x1 = xindex // 3844 % 512
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3844 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_view_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x1 = xindex // 3844 % 1024
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_4, (1024, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (1024,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 512, 62, 62), (1968128, 3844, 62, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1024, 62, 62), (3936256, 3844, 62, 1))
buf2 = extern_kernels.convolution(primals_8, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 62, 62), (984064, 3844, 62, 1))
buf3 = extern_kernels.convolution(primals_8, primals_9, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 62, 62), (984064, 3844, 62, 1))
buf4 = buf0
del buf0
buf5 = reinterpret_tensor(buf4, (8, 256, 62, 62), (984064, 3844, 62,
1), 0)
del buf4
get_raw_stream(0)
triton_poi_fused_convolution_view_0[grid(7872512)](buf5, primals_2,
7872512, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf6 = buf2
del buf2
triton_poi_fused_convolution_1[grid(3936256)](buf6, primals_7,
3936256, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 1024,
62, 62), (0, 3844, 62, 1), 0), buf5, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf7, (1, 8, 1, 1), (8, 1, 1, 1))
buf8 = buf1
del buf1
buf9 = reinterpret_tensor(buf8, (16, 256, 62, 62), (984064, 3844,
62, 1), 0)
del buf8
triton_poi_fused_convolution_view_2[grid(15745024)](buf9, primals_5,
15745024, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf10 = buf3
del buf3
triton_poi_fused_convolution_1[grid(3936256)](buf10, primals_10,
3936256, XBLOCK=512, num_warps=8, num_stages=1)
del primals_10
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (1,
1024, 62, 62), (0, 3844, 62, 1), 0), buf9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf11, (1, 16, 1, 1), (16, 1, 1, 1))
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4,
1, 1), (4, 1, 1, 1), 0), primals_11, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 1, 1), (4, 1, 1, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_3[grid(16)](buf13, primals_12, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_12
return (reinterpret_tensor(buf7, (4, 2, 1, 1), (2, 1, 1, 1), 0), buf13,
primals_1, primals_3, primals_4, primals_6, primals_8, primals_9,
primals_11, buf5, reinterpret_tensor(buf6, (1, 1024, 62, 62), (
3936256, 3844, 62, 1), 0), buf9, reinterpret_tensor(buf10, (1, 1024,
62, 62), (3936256, 3844, 62, 1), 0), reinterpret_tensor(buf11, (4,
4, 1, 1), (4, 1, 1, 1), 0))
def xcorr_fast(x, kernel):
"""group conv2d to calculate cross correlation, fast version
"""
batch = kernel.size()[0]
pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3])
px = x.view(1, -1, x.size()[2], x.size()[3])
po = F.conv2d(px, pk, groups=batch)
po = po.view(batch, -1, po.size()[2], po.size()[3])
return po
class BAN(nn.Module):
def __init__(self):
super(BAN, self).__init__()
def forward(self, z_f, x_f):
raise NotImplementedError
class UPChannelBANNew(BAN):
def __init__(self, feature_in=256, cls_out_channels=2):
super(UPChannelBANNew, self).__init__()
cls_output = cls_out_channels
loc_output = 4
self.template_cls_conv = nn.Conv2d(feature_in, feature_in *
cls_output, kernel_size=3)
self.template_loc_conv = nn.Conv2d(feature_in, feature_in *
loc_output, kernel_size=3)
self.search_cls_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3)
self.search_loc_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3)
self.loc_adjust = nn.Conv2d(loc_output, loc_output, kernel_size=1)
def forward(self, input_0, input_1):
primals_1 = self.template_cls_conv.weight
primals_2 = self.template_cls_conv.bias
primals_4 = self.template_loc_conv.weight
primals_5 = self.template_loc_conv.bias
primals_6 = self.search_cls_conv.weight
primals_7 = self.search_cls_conv.bias
primals_9 = self.search_loc_conv.weight
primals_10 = self.search_loc_conv.bias
primals_11 = self.loc_adjust.weight
primals_12 = self.loc_adjust.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0], output[1]
|
Edwardsoft/siamban
|
UPChannelBAN
| false | 9,207 |
[
"Apache-2.0"
] | 0 |
f89e70485437fa240bcf4ee4929e3cb6d5211ebc
|
https://github.com/Edwardsoft/siamban/tree/f89e70485437fa240bcf4ee4929e3cb6d5211ebc
|
MultiheadAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qa/cqazar4hg4rdjbxm7zr5mix2w3dkhfmvvjksn7c6lktr5yfe6ndy.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/c2/cc2wsialcqiknwetscnqy3fzaqmmib3cxfb7tsfjx7hdlsxbdq7s.py
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# attn_weights_4 => div_1
# sum_1 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_17, [1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf8, buf9, 4, 16, grid=grid(4, 16), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_3.run(buf7, buf11, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
from torch import nn
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
incremental_state=None, need_weights=True, static_kv=False,
attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1)
], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if saved_state is not None:
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.
num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.
num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.
head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1,
self.head_dim)
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, torch.zeros
(key_padding_mask.size(0), 1).type_as(key_padding_mask)
], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
if self.onnx_trace:
attn_weights = torch.where(key_padding_mask.unsqueeze(1).
unsqueeze(2), torch.Tensor([float('-Inf')]),
attn_weights.float()).type_as(attn_weights)
else:
attn_weights = attn_weights.float().masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')
).type_as(attn_weights)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=
self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz,
embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch import nn
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
Ivan-Dimitrov/ml_systems_code_pruning
|
MultiheadAttention
| false | 9,208 |
[
"BSD-3-Clause"
] | 0 |
54cc9f35a87e52c1fef870b7cb54cb03239d5c96
|
https://github.com/Ivan-Dimitrov/ml_systems_code_pruning/tree/54cc9f35a87e52c1fef870b7cb54cb03239d5c96
|
SetConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py
# Topologically Sorted Source Nodes: [hid_flow], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# hid_flow => relu
# Graph fragment:
# %add_tensor_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_9, %primals_9), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_9,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/27/c27numy7223mn467xcmhi3zmthhvpapnsisc2bsslgrnbfln4g26.py
# Topologically Sorted Source Nodes: [hid_sample_1, hid_sample_2, hid_sample_3], Original ATen: [aten.relu, aten.mul, aten.sum, aten.threshold_backward]
# Source node to ATen node mapping:
# hid_sample_1 => relu_3
# hid_sample_2 => mul
# hid_sample_3 => sum_1
# Graph fragment:
# %add_tensor_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_6, %primals_15), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_6,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_3, %primals_4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_mul_relu_sum_threshold_backward_1 = async_compile.triton('triton_poi_fused_mul_relu_sum_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sum_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_sum_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp25 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tmp18 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = triton_helpers.maximum(tmp3, tmp26)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(out_ptr0 + (x2), tmp24, xmask)
tl.store(out_ptr1 + (x2), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/nq/cnqeoxvzay3gaagawonyeds237qlv4mrbuw4kd7gawympuodtu2a.py
# Topologically Sorted Source Nodes: [hid], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# hid => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze, %squeeze_1, %squeeze_2, %relu_1], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (4 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr1 + (8 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr1 + (12 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 8, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + ((16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (4 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr3 + (8 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr3 + (12 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp20 / tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp19, tmp28, tmp29)
tmp31 = tmp0 >= tmp17
tmp32 = tl.full([1], 12, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr4 + ((4*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr5 + ((16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tl.load(in_ptr5 + (4 + (16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr5 + (8 + (16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tl.load(in_ptr5 + (12 + (16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tmp40 + tmp41
tmp43 = tmp35 / tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp34, tmp43, tmp44)
tmp46 = tmp0 >= tmp32
tmp47 = tl.full([1], 16, tl.int64)
tmp48 = tmp0 < tmp47
tmp49 = tl.load(in_ptr6 + ((4*x1) + ((-12) + x0)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr7 + ((-12) + x0), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full([1], 0, tl.int32)
tmp53 = triton_helpers.maximum(tmp52, tmp51)
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp46, tmp53, tmp54)
tmp56 = tl.where(tmp34, tmp45, tmp55)
tmp57 = tl.where(tmp19, tmp30, tmp56)
tmp58 = tl.where(tmp4, tmp15, tmp57)
tl.store(out_ptr0 + (x2), tmp58, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rh/crhvyy3w3uejbzndu7qftnyc25sndrfzlmb3i2bzpyadobz7z7bm.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_27), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ax/caxgsevarnzbzwpagpzixoy6s47ewanoqoh2ibp2x337koawfiec.py
# Topologically Sorted Source Nodes: [hid_flow_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hid_flow_1 => relu_1
# Graph fragment:
# %add_tensor_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_8, %primals_11), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_8,), kwargs = {})
# %le_7 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4, ), (1, ))
assert_size_stride(primals_22, (4, 4), (4, 1))
assert_size_stride(primals_23, (4, ), (1, ))
assert_size_stride(primals_24, (4, 16), (16, 1))
assert_size_stride(primals_25, (4, ), (1, ))
assert_size_stride(primals_26, (1, 4), (4, 1))
assert_size_stride(primals_27, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_7, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf0)
del primals_8
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [hid_flow], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_9, 16, grid=grid(16), stream=stream0)
del primals_9
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf3)
del primals_12
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [hid_sample], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf4, primals_13, 16, grid=grid(16), stream=stream0)
del primals_13
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_sample_1, hid_sample_2, hid_sample_3], Original ATen: [aten.relu, aten.mul, aten.sum, aten.threshold_backward]
triton_poi_fused_mul_relu_sum_threshold_backward_1.run(buf5, primals_15, primals_4, buf6, buf22, 16, grid=grid(16), stream=stream0)
del primals_15
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_2, reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf7)
del primals_16
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [hid_predicate], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf8, primals_17, 16, grid=grid(16), stream=stream0)
del primals_17
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_predicate_1, hid_predicate_2, hid_predicate_3], Original ATen: [aten.relu, aten.mul, aten.sum, aten.threshold_backward]
triton_poi_fused_mul_relu_sum_threshold_backward_1.run(buf9, primals_19, primals_5, buf10, buf21, 16, grid=grid(16), stream=stream0)
del primals_19
buf11 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf11)
del primals_20
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [hid_join], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf12, primals_21, 16, grid=grid(16), stream=stream0)
del primals_21
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf12, reinterpret_tensor(primals_22, (4, 4), (1, 4), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_join_1, hid_join_2, hid_join_3], Original ATen: [aten.relu, aten.mul, aten.sum, aten.threshold_backward]
triton_poi_fused_mul_relu_sum_threshold_backward_1.run(buf13, primals_23, primals_6, buf14, buf20, 16, grid=grid(16), stream=stream0)
del buf13
del primals_23
buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf6, primals_4, buf10, primals_5, buf14, primals_6, buf2, primals_11, buf15, 64, grid=grid(64), stream=stream0)
del buf10
del buf14
buf16 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, reinterpret_tensor(primals_24, (16, 4), (1, 16), 0), out=buf16)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [hid_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf17, primals_25, 16, grid=grid(16), stream=stream0)
del primals_25
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf17, reinterpret_tensor(primals_26, (4, 1), (1, 4), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf19, primals_27, 4, grid=grid(4), stream=stream0)
del primals_27
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_flow_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf2, primals_11, buf23, 16, grid=grid(16), stream=stream0)
del buf2
del primals_11
return (buf19, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf1, buf4, buf8, buf12, buf15, buf17, buf19, primals_26, primals_24, buf20, primals_22, buf21, primals_18, buf22, primals_14, buf23, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.nn.functional as F
class SetConv(nn.Module):
def __init__(self, sample_feats, predicate_feats, join_feats,
flow_feats, hid_units, num_hidden_layers=2):
super(SetConv, self).__init__()
self.flow_feats = flow_feats
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
if flow_feats > 0:
self.flow_mlp1 = nn.Linear(flow_feats, hid_units)
self.flow_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 4, hid_units)
else:
self.out_mlp1 = nn.Linear(hid_units * 3, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, samples, predicates, joins, flows, sample_mask,
predicate_mask, join_mask):
"""
#TODO: describe shapes
"""
samples = samples
predicates = predicates
joins = joins
sample_mask = sample_mask
predicate_mask = predicate_mask
join_mask = join_mask
if self.flow_feats:
flows = flows
hid_flow = F.relu(self.flow_mlp1(flows))
hid_flow = F.relu(self.flow_mlp2(hid_flow))
hid_sample = F.relu(self.sample_mlp1(samples))
hid_sample = F.relu(self.sample_mlp2(hid_sample))
hid_sample = hid_sample * sample_mask
hid_sample = torch.sum(hid_sample, dim=1, keepdim=False)
sample_norm = sample_mask.sum(1, keepdim=False)
hid_sample = hid_sample / sample_norm
hid_predicate = F.relu(self.predicate_mlp1(predicates))
hid_predicate = F.relu(self.predicate_mlp2(hid_predicate))
hid_predicate = hid_predicate * predicate_mask
hid_predicate = torch.sum(hid_predicate, dim=1, keepdim=False)
predicate_norm = predicate_mask.sum(1, keepdim=False)
hid_predicate = hid_predicate / predicate_norm
hid_join = F.relu(self.join_mlp1(joins))
hid_join = F.relu(self.join_mlp2(hid_join))
hid_join = hid_join * join_mask
hid_join = torch.sum(hid_join, dim=1, keepdim=False)
join_norm = join_mask.sum(1, keepdim=False)
hid_join = hid_join / join_norm
assert hid_sample.shape == hid_predicate.shape == hid_join.shape
hid_sample = hid_sample.squeeze()
hid_predicate = hid_predicate.squeeze()
hid_join = hid_join.squeeze()
if self.flow_feats:
hid = torch.cat((hid_sample, hid_predicate, hid_join, hid_flow), 1)
else:
hid = torch.cat((hid_sample, hid_predicate, hid_join), 1)
hid = F.relu(self.out_mlp1(hid))
out = torch.sigmoid(self.out_mlp2(hid))
return out
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]),
torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'sample_feats': 4, 'predicate_feats': 4, 'join_feats': 4,
'flow_feats': 4, 'hid_units': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_relu_sum_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp25 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tmp18 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = triton_helpers.maximum(tmp3, tmp26)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(out_ptr0 + x2, tmp24, xmask)
tl.store(out_ptr1 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (16 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (4 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr1 + (8 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr1 + (12 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 8, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + (16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (4 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr3 + (8 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr3 + (12 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp20 / tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp19, tmp28, tmp29)
tmp31 = tmp0 >= tmp17
tmp32 = tl.full([1], 12, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr4 + (4 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr5 + (16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tl.load(in_ptr5 + (4 + 16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr5 + (8 + 16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tl.load(in_ptr5 + (12 + 16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp42 = tmp40 + tmp41
tmp43 = tmp35 / tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp34, tmp43, tmp44)
tmp46 = tmp0 >= tmp32
tl.full([1], 16, tl.int64)
tmp49 = tl.load(in_ptr6 + (4 * x1 + (-12 + x0)), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr7 + (-12 + x0), tmp46 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full([1], 0, tl.int32)
tmp53 = triton_helpers.maximum(tmp52, tmp51)
tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype)
tmp55 = tl.where(tmp46, tmp53, tmp54)
tmp56 = tl.where(tmp34, tmp45, tmp55)
tmp57 = tl.where(tmp19, tmp30, tmp56)
tmp58 = tl.where(tmp4, tmp15, tmp57)
tl.store(out_ptr0 + x2, tmp58, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4, 4), (4, 1))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4, 16), (16, 1))
assert_size_stride(primals_25, (4,), (1,))
assert_size_stride(primals_26, (1, 4), (4, 1))
assert_size_stride(primals_27, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, reinterpret_tensor(primals_8, (4, 4),
(1, 4), 0), out=buf0)
del primals_8
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, primals_9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_12, (4, 4),
(1, 4), 0), out=buf3)
del primals_12
buf4 = buf3
del buf3
triton_poi_fused_relu_0[grid(16)](buf4, primals_13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_13
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_14, (4, 4), (1,
4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_mul_relu_sum_threshold_backward_1[grid(16)](buf5,
primals_15, primals_4, buf6, buf22, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_15
buf7 = buf5
del buf5
extern_kernels.mm(primals_2, reinterpret_tensor(primals_16, (4, 4),
(1, 4), 0), out=buf7)
del primals_16
buf8 = buf7
del buf7
triton_poi_fused_relu_0[grid(16)](buf8, primals_17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_17
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_18, (4, 4), (1,
4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_mul_relu_sum_threshold_backward_1[grid(16)](buf9,
primals_19, primals_5, buf10, buf21, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_19
buf11 = buf9
del buf9
extern_kernels.mm(primals_3, reinterpret_tensor(primals_20, (4, 4),
(1, 4), 0), out=buf11)
del primals_20
buf12 = buf11
del buf11
triton_poi_fused_relu_0[grid(16)](buf12, primals_21, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_21
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_22, (4, 4), (1,
4), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_mul_relu_sum_threshold_backward_1[grid(16)](buf13,
primals_23, primals_6, buf14, buf20, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf13
del primals_23
buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_cat_2[grid(64)](buf6, primals_4, buf10, primals_5,
buf14, primals_6, buf2, primals_11, buf15, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf14
buf16 = buf6
del buf6
extern_kernels.mm(buf15, reinterpret_tensor(primals_24, (16, 4), (1,
16), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_0[grid(16)](buf17, primals_25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_25
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_26, (4, 1), (1,
4), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_sigmoid_3[grid(4)](buf19, primals_27, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_27
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(16)](buf2,
primals_11, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del primals_11
return (buf19, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, buf1, buf4, buf8, buf12, buf15, buf17, buf19,
primals_26, primals_24, buf20, primals_22, buf21, primals_18, buf22,
primals_14, buf23, primals_10)
class SetConvNew(nn.Module):
def __init__(self, sample_feats, predicate_feats, join_feats,
flow_feats, hid_units, num_hidden_layers=2):
super(SetConvNew, self).__init__()
self.flow_feats = flow_feats
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
if flow_feats > 0:
self.flow_mlp1 = nn.Linear(flow_feats, hid_units)
self.flow_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 4, hid_units)
else:
self.out_mlp1 = nn.Linear(hid_units * 3, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6):
primals_1 = self.sample_mlp1.weight
primals_9 = self.sample_mlp1.bias
primals_2 = self.sample_mlp2.weight
primals_11 = self.sample_mlp2.bias
primals_3 = self.predicate_mlp1.weight
primals_13 = self.predicate_mlp1.bias
primals_7 = self.predicate_mlp2.weight
primals_15 = self.predicate_mlp2.bias
primals_8 = self.join_mlp1.weight
primals_17 = self.join_mlp1.bias
primals_10 = self.join_mlp2.weight
primals_19 = self.join_mlp2.bias
primals_12 = self.flow_mlp1.weight
primals_21 = self.flow_mlp1.bias
primals_14 = self.flow_mlp2.weight
primals_23 = self.flow_mlp2.bias
primals_24 = self.out_mlp1.weight
primals_25 = self.out_mlp1.bias
primals_26 = self.out_mlp2.weight
primals_27 = self.out_mlp2.bias
primals_16 = input_0
primals_18 = input_1
primals_20 = input_2
primals_22 = input_3
primals_4 = input_4
primals_5 = input_5
primals_6 = input_6
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
|
JonathanRaiman/CEB
|
SetConv
| false | 9,209 |
[
"MIT"
] | 0 |
ec5338dcaa939c5df36a47ea9d0895137b1e1b5e
|
https://github.com/JonathanRaiman/CEB/tree/ec5338dcaa939c5df36a47ea9d0895137b1e1b5e
|
Custom_dropout
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/w3/cw37r57zplwba5pgsrchxcoapbb4gzt5meaqi2jt7d3cdzwgojtc.py
# Topologically Sorted Source Nodes: [mask, mul], Original ATen: [aten.repeat, aten.mul]
# Source node to ATen node mapping:
# mask => repeat
# mul => mul
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view, [1, 4]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%repeat, %arg1_1), kwargs = {})
triton_poi_fused_mul_repeat_0 = async_compile.triton('triton_poi_fused_mul_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, ), (1, ))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mask, mul], Original ATen: [aten.repeat, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_repeat_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.parallel
class Custom_dropout(nn.Module):
"""
An implementation for few , Given a task perform a rowise sum of 2-d
matrix , you get a zero out the contribution of few of rows in the matrix
Given, X a 2-d matrix consisting of row vectors (1-d) x1 , x2 ,..xn.
Sum = x1 + 0.x2 + .. + 0.xi + .. +xn
"""
def __init__(self, dp_rate: 'float', n_permutation: 'int'):
"""
Parameters
----------
dp_rate: float
p value of dropout.
"""
super(Custom_dropout, self).__init__()
self.dropout = nn.Dropout(p=dp_rate)
self.ones = nn.Parameter(torch.ones(n_permutation), requires_grad=False
)
def forward(self, layer):
"""
Returns
-------
node_feats: torch.Tensor
Updated tensor.
"""
mask = self.dropout(self.ones).view(layer.shape[0], 1).repeat(1,
layer.shape[1])
return mask * layer
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dp_rate': 0.5, 'n_permutation': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4,), (1,))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_repeat_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class Custom_dropoutNew(nn.Module):
"""
An implementation for few , Given a task perform a rowise sum of 2-d
matrix , you get a zero out the contribution of few of rows in the matrix
Given, X a 2-d matrix consisting of row vectors (1-d) x1 , x2 ,..xn.
Sum = x1 + 0.x2 + .. + 0.xi + .. +xn
"""
def __init__(self, dp_rate: 'float', n_permutation: 'int'):
"""
Parameters
----------
dp_rate: float
p value of dropout.
"""
super(Custom_dropoutNew, self).__init__()
self.dropout = nn.Dropout(p=dp_rate)
self.ones = nn.Parameter(torch.ones(n_permutation), requires_grad=False
)
def forward(self, input_0):
arg0_1 = self.ones
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
JoseAntonioSiguenza/deepchem
|
Custom_dropout
| false | 9,210 |
[
"MIT"
] | 0 |
05fe1b186ec154e18de9aa1b110e9258dc484e21
|
https://github.com/JoseAntonioSiguenza/deepchem/tree/05fe1b186ec154e18de9aa1b110e9258dc484e21
|
TwoLayerCNN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/q5/cq5wusjiorttrifgkbgmb575ri5bohmulexkpd7lpcdrnw7myr2f.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/q2/cq2ab4zm7eyvynktpy632nombwctzki5grvf6od7jvr4qq7wwtqo.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 2], [1, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr1 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf5, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.int8)
buf3 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 32, grid=grid(32), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (4, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_4
return (buf4, primals_2, primals_1, reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 8), (8, 1), 0), primals_3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TwoLayerCNN(nn.Module):
def __init__(self, C, M, embedding, channel, mtc_input):
super(TwoLayerCNN, self).__init__()
self.C = C
self.M = M
self.embedding = embedding
self.mtc_input = C if mtc_input else 1
self.conv1 = nn.Conv1d(self.mtc_input, channel, 3, 1, padding=1,
bias=False)
self.flat_size = M // 2 * C // self.mtc_input * channel
self.fc1 = nn.Linear(self.flat_size, embedding)
def forward(self, x):
N = len(x)
x = x.view(-1, self.mtc_input, self.M)
x = F.relu(self.conv1(x))
x = F.max_pool1d(x, 2)
x = x.view(N, self.flat_size)
x = self.fc1(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'C': 4, 'M': 4, 'embedding': 4, 'channel': 4, 'mtc_input': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.int8)
buf3 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(32)](buf1, buf2,
buf3, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (4, 8), (8,
1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha
=1, beta=1, out=buf4)
del primals_4
return buf4, primals_2, primals_1, reinterpret_tensor(buf1, (4, 4, 1, 4
), (16, 4, 4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 8), (8, 1), 0
), primals_3, buf5
class TwoLayerCNNNew(nn.Module):
def __init__(self, C, M, embedding, channel, mtc_input):
super(TwoLayerCNNNew, self).__init__()
self.C = C
self.M = M
self.embedding = embedding
self.mtc_input = C if mtc_input else 1
self.conv1 = nn.Conv1d(self.mtc_input, channel, 3, 1, padding=1,
bias=False)
self.flat_size = M // 2 * C // self.mtc_input * channel
self.fc1 = nn.Linear(self.flat_size, embedding)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
LFhase/string-embed
|
TwoLayerCNN
| false | 9,211 |
[
"MIT"
] | 0 |
da8eb60186fcd26a94734f265f79fa5fc5096f76
|
https://github.com/LFhase/string-embed/tree/da8eb60186fcd26a94734f265f79fa5fc5096f76
|
Shifted_softplus
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/fk/cfkre7tbjeltmedfz7lb2anfxujrt4eo3nj6bes7djl54knc2yvv.py
# Topologically Sorted Source Nodes: [softplus, node_feats], Original ATen: [aten.softplus, aten.sub]
# Source node to ATen node mapping:
# node_feats => sub
# softplus => div, exp, gt, log1p, mul, where
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %div), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %arg1_1), kwargs = {})
triton_poi_fused_softplus_sub_0 = async_compile.triton('triton_poi_fused_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp9 = tl.load(in_ptr1 + (0))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tmp11 = tmp8 - tmp10
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, node_feats], Original ATen: [aten.softplus, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.parallel
class Shifted_softplus(nn.Module):
"""
Performs a Shifter softplus loss, which modifies with a value of log(2)
"""
def __init__(self):
super(Shifted_softplus, self).__init__()
self.act = nn.Softplus()
self.shift = nn.Parameter(torch.tensor([0.6931]), False)
def forward(self, X):
"""
Applies the Activation function
Parameters
----------
node_feats: torch.Tensor
The node features.
Returns
-------
node_feats: torch.Tensor
The updated node features.
"""
node_feats = self.act(X) - self.shift
return node_feats
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tmp11 = tmp8 - tmp10
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class Shifted_softplusNew(nn.Module):
"""
Performs a Shifter softplus loss, which modifies with a value of log(2)
"""
def __init__(self):
super(Shifted_softplusNew, self).__init__()
self.act = nn.Softplus()
self.shift = nn.Parameter(torch.tensor([0.6931]), False)
def forward(self, input_0):
arg1_1 = self.shift
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
JoseAntonioSiguenza/deepchem
|
Shifted_softplus
| false | 9,212 |
[
"MIT"
] | 0 |
05fe1b186ec154e18de9aa1b110e9258dc484e21
|
https://github.com/JoseAntonioSiguenza/deepchem/tree/05fe1b186ec154e18de9aa1b110e9258dc484e21
|
GraphConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py
# Topologically Sorted Source Nodes: [cat_feats], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_feats => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %bmm], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xk/cxkfjvxcrwrocrik25vel4gb2spp4jrbijo33ra4mgkw3hn2qgah.py
# Topologically Sorted Source Nodes: [add, out_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# out_1 => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_feats], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, buf1, 128, grid=grid(128), stream=stream0)
del primals_1
buf2 = reinterpret_tensor(buf0, (1, 16, 4), (64, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 8), (0, 8, 1), 0), reinterpret_tensor(primals_3, (1, 8, 4), (32, 4, 1), 0), out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add, out_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf3, primals_4, buf4, 64, grid=grid(64), stream=stream0)
del primals_4
return (buf3, buf4, reinterpret_tensor(buf1, (1, 8, 16), (128, 1, 8), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class MeanAggregator(nn.Module):
def forward(self, features, A):
x = torch.bmm(A, features)
return x
class GraphConv(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim))
self.bias = nn.Parameter(torch.FloatTensor(out_dim))
init.xavier_uniform_(self.weight)
init.constant_(self.bias, 0)
self.aggregator = MeanAggregator()
def forward(self, features, A):
_b, _n, d = features.shape
assert d == self.in_dim
agg_feats = self.aggregator(features, A)
cat_feats = torch.cat([features, agg_feats], dim=2)
out = torch.einsum('bnd,df->bnf', cat_feats, self.weight)
out = F.relu(out + self.bias)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, buf1, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf2 = reinterpret_tensor(buf0, (1, 16, 4), (64, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 8), (0, 8, 1),
0), reinterpret_tensor(primals_3, (1, 8, 4), (32, 4, 1), 0),
out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf3,
primals_4, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf3, buf4, reinterpret_tensor(buf1, (1, 8, 16), (128, 1, 8), 0)
class MeanAggregator(nn.Module):
def forward(self, features, A):
x = torch.bmm(A, features)
return x
class GraphConvNew(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim))
self.bias = nn.Parameter(torch.FloatTensor(out_dim))
init.xavier_uniform_(self.weight)
init.constant_(self.bias, 0)
self.aggregator = MeanAggregator()
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
HolyCrap96/mmocr-1
|
GraphConv
| false | 9,213 |
[
"Apache-2.0"
] | 0 |
c6c4acd39b1c56fec1b87530b2d241fe8af4ceed
|
https://github.com/HolyCrap96/mmocr-1/tree/c6c4acd39b1c56fec1b87530b2d241fe8af4ceed
|
CmapPafHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf1, buf3, primals_1, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn
import torch.optim
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHead(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHead, self).__init__()
if num_upsample > 0:
self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels,
upsample_channels, num_upsample, num_flat), torch.nn.Conv2d
(upsample_channels, cmap_channels, kernel_size=1, stride=1,
padding=0))
self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels,
upsample_channels, num_upsample, num_flat), torch.nn.Conv2d
(upsample_channels, paf_channels, kernel_size=1, stride=1,
padding=0))
else:
self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x):
return self.cmap_conv(x), self.paf_conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf1, buf3, primals_1, primals_3, primals_4
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHeadNew(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHeadNew, self).__init__()
if num_upsample > 0:
self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels,
upsample_channels, num_upsample, num_flat), torch.nn.Conv2d
(upsample_channels, cmap_channels, kernel_size=1, stride=1,
padding=0))
self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels,
upsample_channels, num_upsample, num_flat), torch.nn.Conv2d
(upsample_channels, paf_channels, kernel_size=1, stride=1,
padding=0))
else:
self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.cmap_conv.weight
primals_2 = self.cmap_conv.bias
primals_4 = self.paf_conv.weight
primals_5 = self.paf_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
KeithStoke/POSE_Test
|
CmapPafHead
| false | 9,214 |
[
"MIT"
] | 0 |
581aaf6f3d4fd50e56aa16c43913292af7d36879
|
https://github.com/KeithStoke/POSE_Test/tree/581aaf6f3d4fd50e56aa16c43913292af7d36879
|
Atom_Wise_Convolution
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [node_feats_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# node_feats_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vn/cvnk354i6ugz6nckeqbdkwhdm372lrhj6gdqh2fio2ccs4vdjthr.py
# Topologically Sorted Source Nodes: [node_feats_1, softplus, node_feats_2], Original ATen: [aten.native_layer_norm, aten.softplus, aten.sub]
# Source node to ATen node mapping:
# node_feats_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# node_feats_2 => sub_1
# softplus => div, exp, gt, log1p, mul_2, where
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_2,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_2, 20.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_1, %div), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %primals_6), kwargs = {})
triton_poi_fused_native_layer_norm_softplus_sub_1 = async_compile.triton('triton_poi_fused_native_layer_norm_softplus_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_softplus_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_softplus_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr5 + (0))
tmp18 = tl.broadcast_to(tmp17, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = 20.0
tmp12 = tmp10 > tmp11
tmp13 = tl_math.exp(tmp10)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp14 * tmp9
tmp16 = tl.where(tmp12, tmp8, tmp15)
tmp19 = tmp16 - tmp18
tl.store(in_out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [node_feats], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [node_feats_1], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [node_feats_1, softplus, node_feats_2], Original ATen: [aten.native_layer_norm, aten.softplus, aten.sub]
triton_poi_fused_native_layer_norm_softplus_sub_1.run(buf4, buf0, buf1, buf2, primals_4, primals_5, primals_6, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_6
return (buf4, primals_4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.parallel
class Shifted_softplus(nn.Module):
"""
Performs a Shifter softplus loss, which modifies with a value of log(2)
"""
def __init__(self):
super(Shifted_softplus, self).__init__()
self.act = nn.Softplus()
self.shift = nn.Parameter(torch.tensor([0.6931]), False)
def forward(self, X):
"""
Applies the Activation function
Parameters
----------
node_feats: torch.Tensor
The node features.
Returns
-------
node_feats: torch.Tensor
The updated node features.
"""
node_feats = self.act(X) - self.shift
return node_feats
class Atom_Wise_Convolution(nn.Module):
"""
Performs self convolution to each node
"""
def __init__(self, input_feature: 'int', output_feature: 'int', dropout:
'float'=0.2, UseBN: 'bool'=True):
"""
Parameters
----------
input_feature: int
Size of input feature size
output_feature: int
Size of output feature size
dropout: float, defult 0.2
p value for dropout between 0.0 to 1.0
UseBN: bool
Setting it to True will perform Batch Normalisation
"""
super(Atom_Wise_Convolution, self).__init__()
self.conv_weights = nn.Linear(input_feature, output_feature)
self.batch_norm = nn.LayerNorm(output_feature)
self.UseBN = UseBN
self.activation = Shifted_softplus()
self.dropout = nn.Dropout(p=dropout)
def forward(self, node_feats):
"""
Update node representations.
Parameters
----------
node_feats: torch.Tensor
The node features. The shape is `(N, Node_feature_size)`.
Returns
-------
node_feats: torch.Tensor
The updated node features. The shape is `(N, Node_feature_size)`.
"""
node_feats = self.conv_weights(node_feats)
if self.UseBN:
node_feats = self.batch_norm(node_feats)
node_feats = self.activation(node_feats)
node_feats = self.dropout(node_feats)
return node_feats
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_feature': 4, 'output_feature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_softplus_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr5 + 0)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = 20.0
tmp12 = tmp10 > tmp11
tmp13 = tl_math.exp(tmp10)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp14 * tmp9
tmp16 = tl.where(tmp12, tmp8, tmp15)
tmp19 = tmp16 - tmp18
tl.store(in_out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3
del buf3
triton_poi_fused_native_layer_norm_softplus_sub_1[grid(256)](buf4,
buf0, buf1, buf2, primals_4, primals_5, primals_6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
del buf2
del primals_6
return buf4, primals_4, primals_5, reinterpret_tensor(primals_3, (64, 4
), (4, 1), 0), buf0
class Shifted_softplus(nn.Module):
"""
Performs a Shifter softplus loss, which modifies with a value of log(2)
"""
def __init__(self):
super(Shifted_softplus, self).__init__()
self.act = nn.Softplus()
self.shift = nn.Parameter(torch.tensor([0.6931]), False)
def forward(self, X):
"""
Applies the Activation function
Parameters
----------
node_feats: torch.Tensor
The node features.
Returns
-------
node_feats: torch.Tensor
The updated node features.
"""
node_feats = self.act(X) - self.shift
return node_feats
class Atom_Wise_ConvolutionNew(nn.Module):
"""
Performs self convolution to each node
"""
def __init__(self, input_feature: 'int', output_feature: 'int', dropout:
'float'=0.2, UseBN: 'bool'=True):
"""
Parameters
----------
input_feature: int
Size of input feature size
output_feature: int
Size of output feature size
dropout: float, defult 0.2
p value for dropout between 0.0 to 1.0
UseBN: bool
Setting it to True will perform Batch Normalisation
"""
super(Atom_Wise_ConvolutionNew, self).__init__()
self.conv_weights = nn.Linear(input_feature, output_feature)
self.batch_norm = nn.LayerNorm(output_feature)
self.UseBN = UseBN
self.activation = Shifted_softplus()
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_0):
primals_1 = self.conv_weights.weight
primals_2 = self.conv_weights.bias
primals_4 = self.batch_norm.weight
primals_5 = self.batch_norm.bias
primals_6 = self.activation.shift
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
JoseAntonioSiguenza/deepchem
|
Atom_Wise_Convolution
| false | 9,215 |
[
"MIT"
] | 0 |
05fe1b186ec154e18de9aa1b110e9258dc484e21
|
https://github.com/JoseAntonioSiguenza/deepchem/tree/05fe1b186ec154e18de9aa1b110e9258dc484e21
|
CrossUnit
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# z => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/yl/cyls3xe3v3b2kblyifcg6sp76moeiqvsl6sw4civ5jjxaxz5izp4.py
# Topologically Sorted Source Nodes: [add, relu_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# relu_1 => relu_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_3), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add, relu_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf3, primals_3, primals_5, buf4, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
from torch.nn import functional
class CrossUnit(nn.Module):
def __init__(self, input_dim, inner_dim, out_dim) ->None:
super().__init__()
self.fc_1 = nn.Linear(input_dim, inner_dim)
self.fc_2 = nn.Linear(inner_dim, out_dim)
self.align = input_dim == out_dim
if not self.align:
self.fc_3 = nn.Linear(input_dim, out_dim)
def forward(self, x):
z = self.fc_1(x).relu()
z = self.fc_2(z)
if not self.align:
x = self.fc_3(x)
return functional.relu(x + z)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'inner_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf3,
primals_3, primals_5, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class CrossUnitNew(nn.Module):
def __init__(self, input_dim, inner_dim, out_dim) ->None:
super().__init__()
self.fc_1 = nn.Linear(input_dim, inner_dim)
self.fc_2 = nn.Linear(inner_dim, out_dim)
self.align = input_dim == out_dim
if not self.align:
self.fc_3 = nn.Linear(input_dim, out_dim)
def forward(self, input_0):
primals_1 = self.fc_1.weight
primals_2 = self.fc_1.bias
primals_4 = self.fc_2.weight
primals_5 = self.fc_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
LSTM-Kirigaya/NUAA-guandan
|
CrossUnit
| false | 9,216 |
[
"MIT"
] | 0 |
f6920868649c26536b3dc3fce8ecd1d4f7c755fa
|
https://github.com/LSTM-Kirigaya/NUAA-guandan/tree/f6920868649c26536b3dc3fce8ecd1d4f7c755fa
|
RNN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ul/culvxc5xcnacfjypzxghwcyc2445sqsz25ci4rib6axjxs3fv3so.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_1 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/yr/cyr6fatjcqc5np3quy6arljtkkff4qjmueyb5b4pk5xvkxgrzuvd.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_1 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
del buf3
return (buf4, buf1, buf0, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input_size, hidden):
combined = torch.cat((input_size, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, buf1, buf0, buf4
class RNNNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNNew, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
def forward(self, input_0, input_1):
primals_3 = self.i2h.weight
primals_4 = self.i2h.bias
primals_5 = self.i2o.weight
primals_6 = self.i2o.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
LatifB/char-level-classification
|
RNN
| false | 9,217 |
[
"MIT"
] | 0 |
3d0e21e85571efafe0e26c6f27c5fa258a9503da
|
https://github.com/LatifB/char-level-classification/tree/3d0e21e85571efafe0e26c6f27c5fa258a9503da
|
DownConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/it/citiw25o2bzpowghghyesgdfdm2lebsqwdmo6qg6foemy373lm2q.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 64, grid=grid(64), stream=stream0)
return (buf4, buf3, primals_1, primals_2, primals_3, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._utils
import torch.optim
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch._utils
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=256, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(256)](buf3, 256, XBLOCK=256, num_warps
=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(64)](buf3, buf4,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf4, buf3, primals_1, primals_2, primals_3, buf1, buf3, buf5
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class DownConvNew(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
HenryOsborne/SemanticSegmentation
|
DownConv
| false | 9,218 |
[
"MIT"
] | 0 |
d41549c3fd22731d7a12cdb1b438f730b0ebfcbc
|
https://github.com/HenryOsborne/SemanticSegmentation/tree/d41549c3fd22731d7a12cdb1b438f730b0ebfcbc
|
Fp32GroupNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/dr/cdrbbq25gaacwdmuqqn76ytppvbzlwbqwo7aazovogwtjetsi3kf.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# output => add, add_1, mul_1, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
triton_per_fused_native_group_norm_0 = async_compile.triton('triton_per_fused_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + (64*x0)), tmp27, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_0.run(primals_1, primals_2, primals_3, buf0, buf3, buf4, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
return (buf3, primals_1, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf4, (4, 1, 1), (1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
import torch.distributed
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(input.float(), self.num_groups, self.weight.
float() if self.weight is not None else None, self.bias.float() if
self.bias is not None else None, self.eps)
return output.type_as(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_groups': 1, 'num_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + 64 * x0), tmp27, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(4)](primals_1, primals_2,
primals_3, buf0, buf3, buf4, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_2
del primals_3
return buf3, primals_1, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0
), reinterpret_tensor(buf4, (4, 1, 1), (1, 1, 1), 0)
class Fp32GroupNormNew(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DCMMC/chineseocr
|
Fp32GroupNorm
| false | 9,219 |
[
"MIT"
] | 0 |
0b8772615239ea7f212b1ab5bc75183e7e9f16b0
|
https://github.com/DCMMC/chineseocr/tree/0b8772615239ea7f212b1ab5bc75183e7e9f16b0
|
DiceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ih/cihrirlgsaahc6num6ehz5ea456s5cjbkexvoym66wqcebrm6xwe.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# dice => div
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-07), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-07), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1e-07
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class DiceLoss(nn.Module):
""" Loss function based on Dice-Sorensen Coefficient (L = 1 - Dice)
Input arguments:
soft : boolean, default = True
Select whether to use soft labelling or not. If true, dice calculated
directly on sigmoid output without converting to binary. If false,
sigmoid output converted to binary based on threshold value
smooth : float, default = 1e-7
Smoothing value to add to numerator and denominator of Dice. Low
value will prevent inf or nan occurrence.
threshold : float, default = 0.5
Threshold of sigmoid activation values to convert to binary.
Only applied if soft=False.
"""
def __init__(self, soft=True, threshold=0.5, eps=1e-07):
super().__init__()
self.eps = eps
self.soft = soft
self.threshold = threshold
def forward(self, inputs, targets):
if not self.soft:
inputs = BinaryDice(inputs, self.threshold)
inputs = inputs.view(-1).float()
targets = targets.view(-1).float()
intersection = torch.sum(inputs * targets)
dice = (2.0 * intersection + self.eps) / (torch.sum(inputs) + torch
.sum(targets) + self.eps)
return 1 - dice
@staticmethod
def BinaryDice(image, threshold=0.5):
return (image > threshold).int()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1e-07
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
""" Loss function based on Dice-Sorensen Coefficient (L = 1 - Dice)
Input arguments:
soft : boolean, default = True
Select whether to use soft labelling or not. If true, dice calculated
directly on sigmoid output without converting to binary. If false,
sigmoid output converted to binary based on threshold value
smooth : float, default = 1e-7
Smoothing value to add to numerator and denominator of Dice. Low
value will prevent inf or nan occurrence.
threshold : float, default = 0.5
Threshold of sigmoid activation values to convert to binary.
Only applied if soft=False.
"""
def __init__(self, soft=True, threshold=0.5, eps=1e-07):
super().__init__()
self.eps = eps
self.soft = soft
self.threshold = threshold
@staticmethod
def BinaryDice(image, threshold=0.5):
return (image > threshold).int()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Jiongqi/RectAngle
|
DiceLoss
| false | 9,220 |
[
"MIT"
] | 0 |
558fa036d1b21b5ae0a556271ab674cd8ffe88b6
|
https://github.com/Jiongqi/RectAngle/tree/558fa036d1b21b5ae0a556271ab674cd8ffe88b6
|
MsgNorm
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/dv/cdvohkptuslet5o4k7ir3iwxhauwqpaue5iwojpfdtbzbkod67k6.py
# Topologically Sorted Source Nodes: [msg, x_norm, mul, msg_1], Original ATen: [aten.div, aten.linalg_vector_norm, aten.mul]
# Source node to ATen node mapping:
# msg => div
# msg_1 => mul_1
# mul => mul
# x_norm => pow_3, pow_4, sum_2
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg2_1), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (0))
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp16
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp15 * tmp27
tmp31 = tmp28 * tmp30
tl.store(in_out_ptr0 + (x3), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [msg, x_norm, mul, msg_1], Original ATen: [aten.div, aten.linalg_vector_norm, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_mul_0.run(buf1, arg0_1, arg1_1, arg2_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
class MsgNorm(torch.nn.Module):
def __init__(self, learn_msg_scale=False):
super(MsgNorm, self).__init__()
self.msg_scale = torch.nn.Parameter(torch.Tensor([1.0]),
requires_grad=learn_msg_scale)
def forward(self, x, msg, p=2):
msg = F.normalize(msg, p=p, dim=1)
x_norm = x.norm(p=p, dim=1, keepdim=True)
msg = msg * x_norm * self.msg_scale
return msg
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + 0)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp16
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp15 * tmp27
tmp31 = tmp28 * tmp30
tl.store(in_out_ptr0 + x3, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_mul_0[grid(256)](buf1,
arg0_1, arg1_1, arg2_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MsgNormNew(torch.nn.Module):
def __init__(self, learn_msg_scale=False):
super(MsgNormNew, self).__init__()
self.msg_scale = torch.nn.Parameter(torch.Tensor([1.0]),
requires_grad=learn_msg_scale)
def forward(self, input_0, input_1):
arg2_1 = self.msg_scale
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
LMZimmer/nasbench301
|
MsgNorm
| false | 9,221 |
[
"Apache-2.0"
] | 0 |
3329d24a41765e87ac7ebf91fbf38269beeda822
|
https://github.com/LMZimmer/nasbench301/tree/3329d24a41765e87ac7ebf91fbf38269beeda822
|
Modified
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6i/c6id42uo2jsgvqzsfpwnacpw2a4v4w64c4hfd6faswaz2zimmy3p.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 92256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 6
x0 = xindex % 3844
x4 = (xindex // 3844)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/b2/cb2qkfwkqpot4o7ezkjwksmwj6ahaph4byg2fwsdlxoibfd2g6fi.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 23064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = (xindex // 31) % 31
x4 = (xindex // 961)
x3 = (xindex // 5766)
x5 = xindex % 5766
tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + (5792*x3)), tmp6, xmask)
tl.store(out_ptr1 + (x5 + (5888*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/x4/cx4jazumtgfxgxdrtkraz4w7d25nni26hhwy4jqomej63gqhlbuj.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 33640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 841) % 10
x2 = (xindex // 8410)
x4 = xindex % 8410
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + (8416*x2)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pk/cpkicjkcijwigucxudp4oqcqxfwtqrhykqkra6kses3xpirbhwaa.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 7840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = (xindex // 14) % 14
x2 = (xindex // 196) % 10
x3 = (xindex // 1960)
x4 = xindex % 1960
tmp0 = tl.load(in_ptr0 + ((2*x0) + (58*x1) + (841*x2) + (8416*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (58*x1) + (841*x2) + (8416*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (29 + (2*x0) + (58*x1) + (841*x2) + (8416*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (30 + (2*x0) + (58*x1) + (841*x2) + (8416*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (1984*x3)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (2048*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/o5/co5tuubpblvyslf5cajrnplylzgocelreehz7rxrbelv6uwqiw2p.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 144) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rl/crldl6k4krx6wosdrnu53nyhr3pc2gd6ywaumr4irmp3mix7ghvu.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bj/cbj4beargeobmomoyoipvhddhzwjzfivpgji4ycwr6nudcfhi7tk.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5h/c5hankntmvfjralb52naz7bl7vcxdjctkm4wgldsgeap2fejfb4n.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (10, 6, 3, 3), (54, 9, 3, 1))
assert_size_stride(primals_5, (10, ), (1, ))
assert_size_stride(primals_6, (16, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (120, 144), (144, 1))
assert_size_stride(primals_9, (120, ), (1, ))
assert_size_stride(primals_10, (84, 120), (120, 1))
assert_size_stride(primals_11, (84, ), (1, ))
assert_size_stride(primals_12, (10, 84), (84, 1))
assert_size_stride(primals_13, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 62, 62), (23064, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 6, 62, 62), (23232, 3872, 62, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 92256, grid=grid(92256), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 31, 31), (5792, 961, 31, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 31, 31), (5888, 961, 31, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 23064, grid=grid(23064), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 29, 29), (8410, 841, 29, 1))
buf5 = empty_strided_cuda((4, 10, 29, 29), (8416, 841, 29, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 33640, grid=grid(33640), stream=stream0)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 10, 14, 14), (1984, 196, 14, 1), torch.float32)
buf7 = empty_strided_cuda((4, 10, 14, 14), (2048, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 7840, grid=grid(7840), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 12, 12), (2304, 144, 12, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 9216, grid=grid(9216), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.int8)
buf11 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 2304, grid=grid(2304), stream=stream0)
buf12 = empty_strided_cuda((16, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf11, (16, 144), (144, 1), 0), reinterpret_tensor(primals_8, (144, 120), (1, 144), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_9, 1920, grid=grid(1920), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((16, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (120, 84), (1, 120), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_7.run(buf15, primals_11, 1344, grid=grid(1344), stream=stream0)
del primals_11
buf16 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf15, reinterpret_tensor(primals_12, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf16)
del primals_13
return (buf16, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16, 144), (144, 1), 0), buf13, buf15, primals_12, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 6, 3, 3), (54, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((120, 144), (144, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.nn.functional as F
class Modified(nn.Module):
def __init__(self):
super(Modified, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 10, 3)
self.conv3 = nn.Conv2d(10, 16, 3)
self.fc1 = nn.Linear(16 * 3 * 3, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 16 * 3 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 92256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 6
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 23064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = xindex // 31 % 31
x4 = xindex // 961
x3 = xindex // 5766
x5 = xindex % 5766
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 5792 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 5888 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 33640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 10
x2 = xindex // 8410
x4 = xindex % 8410
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 8416 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 7840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196 % 10
x3 = xindex // 1960
x4 = xindex % 1960
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2 + 8416 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2 + 8416 * x3),
xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2 + 8416 * x3),
xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2 + 8416 * x3),
xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1984 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 2048 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 144 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 24 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 24 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (10, 6, 3, 3), (54, 9, 3, 1))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (16, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (120, 144), (144, 1))
assert_size_stride(primals_9, (120,), (1,))
assert_size_stride(primals_10, (84, 120), (120, 1))
assert_size_stride(primals_11, (84,), (1,))
assert_size_stride(primals_12, (10, 84), (84, 1))
assert_size_stride(primals_13, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 62, 62), (23064, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 6, 62, 62), (23232, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(92256)](buf0, primals_2,
buf1, 92256, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 31, 31), (5792, 961, 31, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 31, 31), (5888, 961, 31, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(23064)](buf1, buf2,
buf3, 23064, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 29, 29), (8410, 841, 29, 1))
buf5 = empty_strided_cuda((4, 10, 29, 29), (8416, 841, 29, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(33640)](buf4, primals_5,
buf5, 33640, XBLOCK=512, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 10, 14, 14), (1984, 196, 14, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 10, 14, 14), (2048, 196, 14, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(7840)](buf5, buf6,
buf7, 7840, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 12, 12), (2304, 144, 12, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(9216)](buf9, primals_7,
9216, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.int8)
buf11 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_5[grid(2304)](buf9, buf10,
buf11, 2304, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((16, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 144), (144, 1), 0),
reinterpret_tensor(primals_8, (144, 120), (1, 144), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(1920)](buf13, primals_9, 1920, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((16, 84), (84, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (120, 84),
(1, 120), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_relu_7[grid(1344)](buf15, primals_11, 1344, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_11
buf16 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_13, buf15, reinterpret_tensor(
primals_12, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf16)
del primals_13
return (buf16, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16,
144), (144, 1), 0), buf13, buf15, primals_12, primals_10, primals_8)
class ModifiedNew(nn.Module):
def __init__(self):
super(ModifiedNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 10, 3)
self.conv3 = nn.Conv2d(10, 16, 3)
self.fc1 = nn.Linear(16 * 3 * 3, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_12 = self.fc3.weight
primals_13 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Karin-S/USYD-ELEC5307
|
Modified
| false | 9,222 |
[
"Apache-2.0"
] | 0 |
83cb40adf0c15ee703a880fc7aba5c69b82a5434
|
https://github.com/Karin-S/USYD-ELEC5307/tree/83cb40adf0c15ee703a880fc7aba5c69b82a5434
|
BartClassificationHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_2 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
from torch import nn
class BartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim, inner_dim, num_classes, pooler_dropout):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, x):
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'inner_dim': 4, 'num_classes': 4,
'pooler_dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4
class BartClassificationHeadNew(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim, inner_dim, num_classes, pooler_dropout):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JuruoMP/gap-exp
|
BartClassificationHead
| false | 9,223 |
[
"Apache-2.0"
] | 0 |
2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05
|
https://github.com/JuruoMP/gap-exp/tree/2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05
|
Discriminator
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/iu/ciuohqr5pqwxxdqpgaszyr2gmkyh4webkx47a5whlcxapcaqrmpd.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 6
y1 = (yindex // 6)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (6*x2) + (150*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mo/cmo7kprbhn5foykjllxlvsdu5fash7avpwgsvrlgsnxi6osh7vsj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 24
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 6
y1 = (yindex // 6)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (6*x2) + (24576*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zd/czdmxdjo6vbpeyu4gsmsvm5mqdlzdrlububsx2rsj2j5l2h4dsrz.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (16*x2) + (400*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5h/c5hkuqsyg6b4mbsdxblt4xagz75onxk3mtokac7gyjwh7ljruai5.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gi/cgimqxxs6qckr3romidticzvnpearx2sdhoesal5xonmke5jjfzs.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7p/c7pmnbahcm3yi2qyuxtlqhxjvvjyzmylknsgacn2efuf4vcdsb3o.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_per_fused__weight_norm_interface_5 = async_compile.triton('triton_per_fused__weight_norm_interface_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_5(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 150
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (150*x0)), rmask & xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ah/cahfaxtna75ni4ziltb65ap3fcjvdtspp55d36wuo54p7phmd4d7.py
# Topologically Sorted Source Nodes: [_weight_norm, x], Original ATen: [aten._weight_norm_interface, aten.convolution]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# x => convolution
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused__weight_norm_interface_convolution_6 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 6
y1 = (yindex // 6)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (6*x2) + (150*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y1), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y1), ymask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + (6*x2) + (150*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/j4/cj4nml7l3vz2a35zvcdcxuvsdtqjohvfzpywagxxromilqxzhbpz.py
# Topologically Sorted Source Nodes: [x, sub, relu, x_1], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# sub => sub
# x => convolution
# x_1 => add
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %primals_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_5), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_sub_threshold_backward_7 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + (x2), tmp8, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/le/cle6rmavp35boyijii2h4xuy6bdwo7zsyw5rt263pugye2575zk7.py
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_1 => pow_3, pow_4, sum_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_7, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2, 3], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
triton_per_fused__weight_norm_interface_8 = async_compile.triton('triton_per_fused__weight_norm_interface_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_8(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 32
XBLOCK: tl.constexpr = 1
rnumel = 400
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (400*x0)), rmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jy/cjyd2bfnjahrfv64m5cnh3lmmfssee6lcgjle7w3my7ijmbkorga.py
# Topologically Sorted Source Nodes: [_weight_norm_1, x_2], Original ATen: [aten._weight_norm_interface, aten.convolution]
# Source node to ATen node mapping:
# _weight_norm_1 => div_1, mul_1
# x_2 => convolution_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_6, %pow_4), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %div_1), kwargs = {})
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %mul_1, %primals_8, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused__weight_norm_interface_convolution_9 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (400*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y1), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y1), ymask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + (16*x2) + (400*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hf/chf2oujwjqo77d2oewwcjflyfxfwpqvtvewb45opuxhypalaj4hi.py
# Topologically Sorted Source Nodes: [x_2, sub_1, relu_1, x_3], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_1 => relu_1
# sub_1 => sub_1
# x_2 => convolution_1
# x_3 => add_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %mul_1, %primals_8, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_1, %primals_9), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_1, %primals_9), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_sub_threshold_backward_10 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + (x2), tmp8, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pi/cpiwvvp45ekwuhrxsyobb3n5ljdhjaagu4euvuzvumipx34ehfq4.py
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_2 => pow_5, pow_6, sum_3
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_11, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1, 2, 3], True), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
triton_per_fused__weight_norm_interface_11 = async_compile.triton('triton_per_fused__weight_norm_interface_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_11(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 64
XBLOCK: tl.constexpr = 1
rnumel = 800
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (800*x0)), rmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gs/cgsknv5coj4ejltnr3jcylmxgk6jlqfs4afhc7njvwvkix2alhpl.py
# Topologically Sorted Source Nodes: [_weight_norm_2, x_4], Original ATen: [aten._weight_norm_interface, aten.convolution]
# Source node to ATen node mapping:
# _weight_norm_2 => div_2, mul_2
# x_4 => convolution_2
# Graph fragment:
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_10, %pow_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {})
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %mul_2, %primals_12, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused__weight_norm_interface_convolution_12 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 32
y1 = (yindex // 32)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (32*x2) + (800*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y1), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y1), None, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (32*x2) + (800*y1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6z/c6zwf7iqh7p7uhqktvfopwmrrh7valfg7nvxllhwstvwc5uzjf6w.py
# Topologically Sorted Source Nodes: [x_4, sub_2, relu_2, x_5], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_2 => relu_2
# sub_2 => sub_2
# x_4 => convolution_2
# x_5 => add_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %mul_2, %primals_12, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_2, %primals_13), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_2, %primals_13), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_sub_threshold_backward_13 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + (x2), tmp8, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/3k/c3kgdgezw74wchseqbghu26t2cbtgdk7bhf7d5o6twleljcuh3si.py
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_3 => pow_7, pow_8, sum_4
# Graph fragment:
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_15, 2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1, 2, 3], True), kwargs = {})
# %pow_8 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {})
triton_red_fused__weight_norm_interface_14 = async_compile.triton('triton_red_fused__weight_norm_interface_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__weight_norm_interface_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__weight_norm_interface_14(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (1600*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = libdevice.sqrt(tmp3)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/32/c32t4jsszdt33uczvurnkh5wbiplpdpqacvw7mhtf6n5kdxl54zb.py
# Topologically Sorted Source Nodes: [_weight_norm_3, x_6], Original ATen: [aten._weight_norm_interface, aten.convolution]
# Source node to ATen node mapping:
# _weight_norm_3 => div_3, mul_3
# x_6 => convolution_3
# Graph fragment:
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_14, %pow_8), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_15, %div_3), kwargs = {})
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_2, %mul_3, %primals_16, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused__weight_norm_interface_convolution_15 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = (yindex // 64)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (64*x2) + (1600*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y1), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y1), None, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (64*x2) + (1600*y1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7r/c7rdxixjosbh7ymn3x5j2sl25x6yqhleclggjp63z2jy7jyjuqvn.py
# Topologically Sorted Source Nodes: [x_6, sub_3, relu_3, x_7], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_3 => relu_3
# sub_3 => sub_3
# x_6 => convolution_3
# x_7 => add_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_2, %mul_3, %primals_16, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_3, %primals_17), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_3,), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_3, %primals_17), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_sub_threshold_backward_16 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + (x2), tmp8, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/h2/ch2muopo5hd67wmawis4b2yeteunbgvli4oplje4f6sgnddstzxr.py
# Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_4 => div_4, mul_4, pow_10, pow_9, sum_5
# Graph fragment:
# %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_19, 2), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_9, [1, 2, 3], True), kwargs = {})
# %pow_10 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_18, %pow_10), kwargs = {})
# %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_19, %div_4), kwargs = {})
triton_per_fused__weight_norm_interface_17 = async_compile.triton('triton_per_fused__weight_norm_interface_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_17(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xg/cxgirr7ywaqvjxpseg4bhe4k4fyif3g5cipvyiq4q2imyyntrosj.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_8 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_3, %mul_4, %primals_20, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_18 = async_compile.triton('triton_poi_fused_convolution_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args
args.clear()
assert_size_stride(primals_1, (16, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 6, 64, 64), (24576, 4096, 64, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (32, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_7, (32, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_8, (32, ), (1, ))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (64, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_11, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_12, (64, ), (1, ))
assert_size_stride(primals_13, (1, ), (1, ))
assert_size_stride(primals_14, (128, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_16, (128, ), (1, ))
assert_size_stride(primals_17, (1, ), (1, ))
assert_size_stride(primals_18, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_19, (1, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_20, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_2, buf0, 96, 25, grid=grid(96, 25), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 6, 64, 64), (24576, 1, 384, 6), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 24, 4096, grid=grid(24, 4096), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_7, buf2, 512, 25, grid=grid(512, 25), stream=stream0)
del primals_7
buf3 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_11, buf3, 2048, 25, grid=grid(2048, 25), stream=stream0)
del primals_11
buf4 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_15, buf4, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_15
buf5 = empty_strided_cuda((16, 1, 1, 1), (1, 16, 16, 16), torch.float32)
buf6 = reinterpret_tensor(buf5, (16, 1, 1, 1), (1, 1, 1, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_5.run(buf6, buf0, 16, 150, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((16, 6, 5, 5), (150, 25, 5, 1), torch.float32)
buf8 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm, x], Original ATen: [aten._weight_norm_interface, aten.convolution]
triton_poi_fused__weight_norm_interface_convolution_6.run(buf0, primals_1, buf6, buf7, buf8, 96, 25, grid=grid(96, 25), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf1, buf8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 16, 32, 32), (16384, 1, 512, 16))
del buf8
buf10 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.float32)
buf37 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.bool)
# Topologically Sorted Source Nodes: [x, sub, relu, x_1], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_sub_threshold_backward_7.run(buf9, primals_3, primals_5, buf10, buf37, 65536, grid=grid(65536), stream=stream0)
del buf9
del primals_3
del primals_5
buf11 = empty_strided_cuda((32, 1, 1, 1), (1, 32, 32, 32), torch.float32)
buf12 = reinterpret_tensor(buf11, (32, 1, 1, 1), (1, 1, 1, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_8.run(buf12, buf2, 32, 400, grid=grid(32), stream=stream0)
buf13 = empty_strided_cuda((32, 16, 5, 5), (400, 25, 5, 1), torch.float32)
buf14 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1, x_2], Original ATen: [aten._weight_norm_interface, aten.convolution]
triton_poi_fused__weight_norm_interface_convolution_9.run(buf2, primals_6, buf12, buf13, buf14, 512, 25, grid=grid(512, 25), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf10, buf14, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 32, 16, 16), (8192, 1, 512, 32))
del buf14
buf16 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.float32)
buf36 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.bool)
# Topologically Sorted Source Nodes: [x_2, sub_1, relu_1, x_3], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_sub_threshold_backward_10.run(buf15, primals_8, primals_9, buf16, buf36, 32768, grid=grid(32768), stream=stream0)
del buf15
del primals_8
del primals_9
buf17 = empty_strided_cuda((64, 1, 1, 1), (1, 64, 64, 64), torch.float32)
buf18 = reinterpret_tensor(buf17, (64, 1, 1, 1), (1, 1, 1, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_11.run(buf18, buf3, 64, 800, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((64, 32, 5, 5), (800, 25, 5, 1), torch.float32)
buf20 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2, x_4], Original ATen: [aten._weight_norm_interface, aten.convolution]
triton_poi_fused__weight_norm_interface_convolution_12.run(buf3, primals_10, buf18, buf19, buf20, 2048, 25, grid=grid(2048, 25), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf16, buf20, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 8, 8), (4096, 1, 512, 64))
del buf20
buf22 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.float32)
buf35 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.bool)
# Topologically Sorted Source Nodes: [x_4, sub_2, relu_2, x_5], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_sub_threshold_backward_13.run(buf21, primals_12, primals_13, buf22, buf35, 16384, grid=grid(16384), stream=stream0)
del buf21
del primals_12
del primals_13
buf23 = empty_strided_cuda((128, 1, 1, 1), (1, 128, 128, 128), torch.float32)
buf24 = reinterpret_tensor(buf23, (128, 1, 1, 1), (1, 1, 1, 1), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_red_fused__weight_norm_interface_14.run(buf24, buf4, 128, 1600, grid=grid(128), stream=stream0)
buf25 = empty_strided_cuda((128, 64, 5, 5), (1600, 25, 5, 1), torch.float32)
buf26 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3, x_6], Original ATen: [aten._weight_norm_interface, aten.convolution]
triton_poi_fused__weight_norm_interface_convolution_15.run(buf4, primals_14, buf24, buf25, buf26, 8192, 25, grid=grid(8192, 25), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf22, buf26, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 4, 4), (2048, 1, 512, 128))
del buf26
buf28 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
buf34 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.bool)
# Topologically Sorted Source Nodes: [x_6, sub_3, relu_3, x_7], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_sub_threshold_backward_16.run(buf27, primals_16, primals_17, buf28, buf34, 8192, grid=grid(8192), stream=stream0)
del buf27
del primals_16
del primals_17
buf29 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf30 = buf29; del buf29 # reuse
buf31 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_17.run(buf30, primals_19, primals_18, buf31, 1, 128, grid=grid(1), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf28, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1, 4, 4), (16, 1, 4, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_18.run(buf33, primals_20, 64, grid=grid(64), stream=stream0)
del primals_20
return (reinterpret_tensor(buf33, (1, 64), (64, 1), 0), buf7, buf13, buf19, buf25, buf31, primals_1, buf0, buf1, primals_6, buf2, primals_10, buf3, primals_14, buf4, primals_18, primals_19, buf6, buf7, buf10, buf12, buf13, buf16, buf18, buf19, buf22, buf24, buf25, buf28, buf30, buf31, buf34, buf35, buf36, buf37, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 6, 64, 64), (24576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((1, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
class TReLU(nn.Module):
def __init__(self):
super(TReLU, self).__init__()
self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.alpha.data.fill_(0)
def forward(self, x):
x = F.relu(x - self.alpha) + self.alpha
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv0 = weightNorm(nn.Conv2d(6, 16, 5, 2, 2))
self.conv1 = weightNorm(nn.Conv2d(16, 32, 5, 2, 2))
self.conv2 = weightNorm(nn.Conv2d(32, 64, 5, 2, 2))
self.conv3 = weightNorm(nn.Conv2d(64, 128, 5, 2, 2))
self.conv4 = weightNorm(nn.Conv2d(128, 1, 1, 1, 0))
self.relu0 = TReLU()
self.relu1 = TReLU()
self.relu2 = TReLU()
self.relu3 = TReLU()
def forward(self, x):
x = self.conv0(x)
x = self.relu0(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = x.view(-1, 64)
return x
def get_inputs():
return [torch.rand([4, 6, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 6
y1 = yindex // 6
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 6 * x2 + 150 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 24
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 6
y1 = yindex // 6
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 6 * x2 + 24576 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 400 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_5(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 150
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 150 * x0), rmask & xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_6(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 6
y1 = yindex // 6
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 6 * x2 + 150 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 6 * x2 + 150 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_7(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + x2, tmp8, None)
tl.store(out_ptr1 + x2, tmp10, None)
@triton.jit
def triton_per_fused__weight_norm_interface_8(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 400
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 400 * x0), rmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, None)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_9(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 400 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 16 * x2 + 400 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_10(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + x2, tmp8, None)
tl.store(out_ptr1 + x2, tmp10, None)
@triton.jit
def triton_per_fused__weight_norm_interface_11(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 800
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 800 * x0), rmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, None)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_12(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 32
y1 = yindex // 32
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 32 * x2 + 800 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 32 * x2 + 800 * y1), tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_13(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + x2, tmp8, None)
tl.store(out_ptr1 + x2, tmp10, None)
@triton.jit
def triton_red_fused__weight_norm_interface_14(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 1600 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = libdevice.sqrt(tmp3)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_convolution_15(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = yindex // 64
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 1600 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 64 * x2 + 1600 * y1), tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_sub_threshold_backward_16(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tmp7 + tmp4
tmp9 = 0.0
tmp10 = tmp7 <= tmp9
tl.store(out_ptr0 + x2, tmp8, None)
tl.store(out_ptr1 + x2, tmp10, None)
@triton.jit
def triton_per_fused__weight_norm_interface_17(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
@triton.jit
def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 6, 64, 64), (24576, 4096, 64, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (32, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_7, (32, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_8, (32,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (64, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_11, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_12, (64,), (1,))
assert_size_stride(primals_13, (1,), (1,))
assert_size_stride(primals_14, (128, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_16, (128,), (1,))
assert_size_stride(primals_17, (1,), (1,))
assert_size_stride(primals_18, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_19, (1, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_20, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 25)](primals_2, buf0, 96, 25, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 6, 64, 64), (24576, 1, 384, 6), torch
.float32)
triton_poi_fused_1[grid(24, 4096)](primals_4, buf1, 24, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.
float32)
triton_poi_fused_2[grid(512, 25)](primals_7, buf2, 512, 25, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_7
buf3 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
triton_poi_fused_3[grid(2048, 25)](primals_11, buf3, 2048, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_11
buf4 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_4[grid(8192, 25)](primals_15, buf4, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_15
buf5 = empty_strided_cuda((16, 1, 1, 1), (1, 16, 16, 16), torch.float32
)
buf6 = reinterpret_tensor(buf5, (16, 1, 1, 1), (1, 1, 1, 1), 0)
del buf5
triton_per_fused__weight_norm_interface_5[grid(16)](buf6, buf0, 16,
150, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((16, 6, 5, 5), (150, 25, 5, 1), torch.float32
)
buf8 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32
)
triton_poi_fused__weight_norm_interface_convolution_6[grid(96, 25)](
buf0, primals_1, buf6, buf7, buf8, 96, 25, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf1, buf8, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 16, 32, 32), (16384, 1, 512, 16))
del buf8
buf10 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.float32)
buf37 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.bool)
triton_poi_fused_add_convolution_relu_sub_threshold_backward_7[grid
(65536)](buf9, primals_3, primals_5, buf10, buf37, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del buf9
del primals_3
del primals_5
buf11 = empty_strided_cuda((32, 1, 1, 1), (1, 32, 32, 32), torch.
float32)
buf12 = reinterpret_tensor(buf11, (32, 1, 1, 1), (1, 1, 1, 1), 0)
del buf11
triton_per_fused__weight_norm_interface_8[grid(32)](buf12, buf2, 32,
400, num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((32, 16, 5, 5), (400, 25, 5, 1), torch.
float32)
buf14 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.
float32)
triton_poi_fused__weight_norm_interface_convolution_9[grid(512, 25)](
buf2, primals_6, buf12, buf13, buf14, 512, 25, XBLOCK=1, YBLOCK
=256, num_warps=4, num_stages=1)
buf15 = extern_kernels.convolution(buf10, buf14, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 32, 16, 16), (8192, 1, 512, 32))
del buf14
buf16 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.float32)
buf36 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.bool)
triton_poi_fused_add_convolution_relu_sub_threshold_backward_10[grid
(32768)](buf15, primals_8, primals_9, buf16, buf36, 32768,
XBLOCK=256, num_warps=4, num_stages=1)
del buf15
del primals_8
del primals_9
buf17 = empty_strided_cuda((64, 1, 1, 1), (1, 64, 64, 64), torch.
float32)
buf18 = reinterpret_tensor(buf17, (64, 1, 1, 1), (1, 1, 1, 1), 0)
del buf17
triton_per_fused__weight_norm_interface_11[grid(64)](buf18, buf3,
64, 800, num_warps=8, num_stages=1)
buf19 = empty_strided_cuda((64, 32, 5, 5), (800, 25, 5, 1), torch.
float32)
buf20 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch
.float32)
triton_poi_fused__weight_norm_interface_convolution_12[grid(2048, 25)](
buf3, primals_10, buf18, buf19, buf20, 2048, 25, XBLOCK=32,
YBLOCK=16, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf16, buf20, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 8, 8), (4096, 1, 512, 64))
del buf20
buf22 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.float32)
buf35 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.bool)
triton_poi_fused_add_convolution_relu_sub_threshold_backward_13[grid
(16384)](buf21, primals_12, primals_13, buf22, buf35, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del buf21
del primals_12
del primals_13
buf23 = empty_strided_cuda((128, 1, 1, 1), (1, 128, 128, 128),
torch.float32)
buf24 = reinterpret_tensor(buf23, (128, 1, 1, 1), (1, 1, 1, 1), 0)
del buf23
triton_red_fused__weight_norm_interface_14[grid(128)](buf24, buf4,
128, 1600, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
buf25 = empty_strided_cuda((128, 64, 5, 5), (1600, 25, 5, 1), torch
.float32)
buf26 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused__weight_norm_interface_convolution_15[grid(8192, 25)](
buf4, primals_14, buf24, buf25, buf26, 8192, 25, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
buf27 = extern_kernels.convolution(buf22, buf26, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 4, 4), (2048, 1, 512, 128))
del buf26
buf28 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
buf34 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.bool)
triton_poi_fused_add_convolution_relu_sub_threshold_backward_16[grid
(8192)](buf27, primals_16, primals_17, buf28, buf34, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del buf27
del primals_16
del primals_17
buf29 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf30 = buf29
del buf29
buf31 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
triton_per_fused__weight_norm_interface_17[grid(1)](buf30,
primals_19, primals_18, buf31, 1, 128, XBLOCK=1, num_warps=2,
num_stages=1)
buf32 = extern_kernels.convolution(buf28, buf31, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1, 4, 4), (16, 1, 4, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_18[grid(64)](buf33, primals_20, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
return (reinterpret_tensor(buf33, (1, 64), (64, 1), 0), buf7, buf13,
buf19, buf25, buf31, primals_1, buf0, buf1, primals_6, buf2,
primals_10, buf3, primals_14, buf4, primals_18, primals_19, buf6,
buf7, buf10, buf12, buf13, buf16, buf18, buf19, buf22, buf24, buf25,
buf28, buf30, buf31, buf34, buf35, buf36, buf37)
class TReLU(nn.Module):
def __init__(self):
super(TReLU, self).__init__()
self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.alpha.data.fill_(0)
def forward(self, x):
x = F.relu(x - self.alpha) + self.alpha
return x
class DiscriminatorNew(nn.Module):
def __init__(self):
super(DiscriminatorNew, self).__init__()
self.conv0 = weightNorm(nn.Conv2d(6, 16, 5, 2, 2))
self.conv1 = weightNorm(nn.Conv2d(16, 32, 5, 2, 2))
self.conv2 = weightNorm(nn.Conv2d(32, 64, 5, 2, 2))
self.conv3 = weightNorm(nn.Conv2d(64, 128, 5, 2, 2))
self.conv4 = weightNorm(nn.Conv2d(128, 1, 1, 1, 0))
self.relu0 = TReLU()
self.relu1 = TReLU()
self.relu2 = TReLU()
self.relu3 = TReLU()
def forward(self, input_0):
primals_3 = self.conv0.bias
primals_1 = self.conv0.weight_g
primals_2 = self.conv0.weight_v
primals_8 = self.conv1.bias
primals_6 = self.conv1.weight_g
primals_7 = self.conv1.weight_v
primals_12 = self.conv2.bias
primals_10 = self.conv2.weight_g
primals_11 = self.conv2.weight_v
primals_16 = self.conv3.bias
primals_14 = self.conv3.weight_g
primals_15 = self.conv3.weight_v
primals_5 = self.conv4.bias
primals_18 = self.conv4.weight_g
primals_19 = self.conv4.weight_v
primals_9 = self.relu0.alpha
primals_13 = self.relu1.alpha
primals_17 = self.relu2.alpha
primals_20 = self.relu3.alpha
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
|
HenryOsborne/LearningToPaint
|
Discriminator
| false | 9,224 |
[
"MIT"
] | 0 |
d8fdf41c8d193b91c78f73b7a092897e846e19eb
|
https://github.com/HenryOsborne/LearningToPaint/tree/d8fdf41c8d193b91c78f73b7a092897e846e19eb
|
Baseline
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6i/c6id42uo2jsgvqzsfpwnacpw2a4v4w64c4hfd6faswaz2zimmy3p.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 92256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 6
x0 = xindex % 3844
x4 = (xindex // 3844)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/b2/cb2qkfwkqpot4o7ezkjwksmwj6ahaph4byg2fwsdlxoibfd2g6fi.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 23064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = (xindex // 31) % 31
x4 = (xindex // 961)
x3 = (xindex // 5766)
x5 = xindex % 5766
tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + (5792*x3)), tmp6, xmask)
tl.store(out_ptr1 + (x5 + (5888*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hs/chstjvfjccblo46oj4nldltidzdc4d625ujd52x6jyeg24yfsfcg.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 53824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 841) % 16
x2 = (xindex // 13456)
x4 = xindex % 13456
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + (13472*x2)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/iy/ciyt7m5nfi2iqdudeevbj2cuimox6jm3xgtf3rfrdvhklukwahrl.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = (xindex // 14) % 14
x2 = (xindex // 196) % 16
x3 = (xindex // 3136)
x4 = xindex
x5 = xindex % 3136
tmp0 = tl.load(in_ptr0 + ((2*x0) + (58*x1) + (841*x2) + (13472*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (58*x1) + (841*x2) + (13472*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (29 + (2*x0) + (58*x1) + (841*x2) + (13472*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (30 + (2*x0) + (58*x1) + (841*x2) + (13472*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4), tmp6, xmask)
tl.store(out_ptr1 + (x5 + (3200*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wu/cwuer4fgxhooaorb25v55zuicxdk7qofrn3dzwiuc2lyd54nu53w.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 144) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/s4/cs4xzmrqtc3jxx2by2tun3mothqckaheozfsdsxc72r5yisfb2lb.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bj/cbj4beargeobmomoyoipvhddhzwjzfivpgji4ycwr6nudcfhi7tk.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5h/c5hankntmvfjralb52naz7bl7vcxdjctkm4wgldsgeap2fejfb4n.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 3, 3), (54, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (120, 288), (288, 1))
assert_size_stride(primals_9, (120, ), (1, ))
assert_size_stride(primals_10, (84, 120), (120, 1))
assert_size_stride(primals_11, (84, ), (1, ))
assert_size_stride(primals_12, (10, 84), (84, 1))
assert_size_stride(primals_13, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 62, 62), (23064, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 6, 62, 62), (23232, 3872, 62, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 92256, grid=grid(92256), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 31, 31), (5792, 961, 31, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 31, 31), (5888, 961, 31, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 23064, grid=grid(23064), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 29, 29), (13456, 841, 29, 1))
buf5 = empty_strided_cuda((4, 16, 29, 29), (13472, 841, 29, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 53824, grid=grid(53824), stream=stream0)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 16, 14, 14), (3136, 196, 14, 1), torch.float32)
buf7 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 12544, grid=grid(12544), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 12, 12), (4608, 144, 12, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 18432, grid=grid(18432), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.int8)
buf11 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 4608, grid=grid(4608), stream=stream0)
buf12 = empty_strided_cuda((16, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf11, (16, 288), (288, 1), 0), reinterpret_tensor(primals_8, (288, 120), (1, 288), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_9, 1920, grid=grid(1920), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((16, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (120, 84), (1, 120), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_7.run(buf15, primals_11, 1344, grid=grid(1344), stream=stream0)
del primals_11
buf16 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf15, reinterpret_tensor(primals_12, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf16)
del primals_13
return (buf16, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16, 288), (288, 1), 0), buf13, buf15, primals_12, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 3, 3), (54, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((120, 288), (288, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.nn.functional as F
class Baseline(nn.Module):
def __init__(self):
super(Baseline, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 3)
self.conv3 = nn.Conv2d(16, 32, 3)
self.fc1 = nn.Linear(32 * 3 * 3, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 32 * 3 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 92256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 6
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 23064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = xindex // 31 % 31
x4 = xindex // 961
x3 = xindex // 5766
x5 = xindex % 5766
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 5792 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 5888 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 53824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 16
x2 = xindex // 13456
x4 = xindex % 13456
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 13472 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196 % 16
x3 = xindex // 3136
x4 = xindex
x5 = xindex % 3136
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2 + 13472 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2 + 13472 * x3),
xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2 + 13472 * x3
), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2 + 13472 * x3
), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, xmask)
tl.store(out_ptr1 + (x5 + 3200 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 144 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 24 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 24 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 3, 3), (54, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (120, 288), (288, 1))
assert_size_stride(primals_9, (120,), (1,))
assert_size_stride(primals_10, (84, 120), (120, 1))
assert_size_stride(primals_11, (84,), (1,))
assert_size_stride(primals_12, (10, 84), (84, 1))
assert_size_stride(primals_13, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 62, 62), (23064, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 6, 62, 62), (23232, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(92256)](buf0, primals_2,
buf1, 92256, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 31, 31), (5792, 961, 31, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 31, 31), (5888, 961, 31, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(23064)](buf1, buf2,
buf3, 23064, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 29, 29), (13456, 841, 29, 1))
buf5 = empty_strided_cuda((4, 16, 29, 29), (13472, 841, 29, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(53824)](buf4, primals_5,
buf5, 53824, XBLOCK=512, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 16, 14, 14), (3136, 196, 14, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(12544)](buf5, buf6,
buf7, 12544, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 12, 12), (4608, 144, 12, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(18432)](buf9, primals_7,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.int8)
buf11 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_5[grid(4608)](buf9, buf10,
buf11, 4608, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((16, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 288), (288, 1), 0),
reinterpret_tensor(primals_8, (288, 120), (1, 288), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(1920)](buf13, primals_9, 1920, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((16, 84), (84, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (120, 84),
(1, 120), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_relu_7[grid(1344)](buf15, primals_11, 1344, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_11
buf16 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_13, buf15, reinterpret_tensor(
primals_12, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf16)
del primals_13
return (buf16, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16,
288), (288, 1), 0), buf13, buf15, primals_12, primals_10, primals_8)
class BaselineNew(nn.Module):
def __init__(self):
super(BaselineNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 3)
self.conv3 = nn.Conv2d(16, 32, 3)
self.fc1 = nn.Linear(32 * 3 * 3, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_12 = self.fc3.weight
primals_13 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Karin-S/USYD-ELEC5307
|
Baseline
| false | 9,225 |
[
"Apache-2.0"
] | 0 |
83cb40adf0c15ee703a880fc7aba5c69b82a5434
|
https://github.com/Karin-S/USYD-ELEC5307/tree/83cb40adf0c15ee703a880fc7aba5c69b82a5434
|
ColorJitterLayer
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/k5/ck5gtelstphbmeyxj47cxu7yycqfyjvkdcsi2irtvkcdext2kdfh.py
# Topologically Sorted Source Nodes: [hsv], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# hsv => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %sub_3, %getitem], 1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 12
x0 = xindex % 4
x2 = (xindex // 48)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (32 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 1.7320508075688772
tmp9 = tmp7 * tmp8
tmp10 = tl.load(in_ptr0 + (x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp11 = 2.0
tmp12 = tmp10 * tmp11
tmp13 = tmp12 - tmp5
tmp14 = tmp13 - tmp6
tmp15 = libdevice.atan2(tmp9, tmp14)
tmp16 = 6.283185307179586
tmp17 = tmp15 % tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = tmp17 != tmp18
tmp20 = libdevice.signbit(tmp17) if (tmp17).dtype is tl.float32 else tmp17 < 0
tmp21 = libdevice.signbit(tmp16) if (tmp16).dtype is tl.float32 else tmp16 < 0
tmp22 = tmp20 != tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 + tmp16
tmp25 = tl.where(tmp23, tmp24, tmp17)
tmp26 = 0.15915494309189535
tmp27 = tmp25 * tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp4, tmp27, tmp28)
tmp30 = tmp0 >= tmp3
tmp31 = tl.full([1], 8, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr0 + (16 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0)
tmp36 = triton_helpers.minimum(tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + (32 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0)
tmp38 = triton_helpers.minimum(tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (48 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0)
tmp40 = triton_helpers.minimum(tmp38, tmp39)
tmp41 = triton_helpers.maximum(tmp34, tmp35)
tmp42 = triton_helpers.maximum(tmp41, tmp37)
tmp43 = triton_helpers.maximum(tmp42, tmp39)
tmp44 = 1e-08
tmp45 = tmp43 + tmp44
tmp46 = tmp40 / tmp45
tmp47 = 1.0
tmp48 = tmp47 - tmp46
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp33, tmp48, tmp49)
tmp51 = tmp0 >= tmp31
tmp52 = tl.full([1], 12, tl.int64)
tmp53 = tmp0 < tmp52
tmp54 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0)
tmp55 = tl.load(in_ptr0 + (16 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0)
tmp56 = triton_helpers.maximum(tmp54, tmp55)
tmp57 = tl.load(in_ptr0 + (32 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0)
tmp58 = triton_helpers.maximum(tmp56, tmp57)
tmp59 = tl.load(in_ptr0 + (48 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0)
tmp60 = triton_helpers.maximum(tmp58, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp51, tmp60, tmp61)
tmp63 = tl.where(tmp33, tmp50, tmp62)
tmp64 = tl.where(tmp4, tmp29, tmp63)
tl.store(out_ptr0 + (x3), tmp64, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gg/cggxgz5kv3f6v57dm57mpxkp77xbxjw2mh4mscq7a4rphyiqpca4.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default_3, index_put
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=4] = call_function[target=torch.ops.aten.index_put_.default](args = (%view, [%bitwise_not], %full_default_3), kwargs = {})
triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 == tmp0
tmp2 = tl_math.abs(tmp0)
tmp3 = float("inf")
tmp4 = tmp2 != tmp3
tmp5 = tmp1 & tmp4
tmp6 = tmp5 == 0
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp0)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tw/ctw7vqtn4wrdd6k3eojcn6wy2cqgcazcuexsxhfewjvf3xjv6ole.py
# Topologically Sorted Source Nodes: [f_h], Original ATen: [aten.new_zeros]
# Source node to ATen node mapping:
# f_h => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 1], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_zeros_2 = async_compile.triton('triton_poi_fused_new_zeros_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/v4/cv46aopjpsjvzqshfrlrag6y4ifo6nlt2huynabmbhvbs437klh2.py
# Topologically Sorted Source Nodes: [f_s], Original ATen: [aten.new_ones]
# Source node to ATen node mapping:
# f_s => full_default_1
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_ones_3 = async_compile.triton('triton_poi_fused_new_ones_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_ones_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_ones_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mq/cmqhtrxejw46pbgdo6fh4f7w3qkoe46hoelc5we53jnftjhog4ar.py
# Topologically Sorted Source Nodes: [mul_2, truediv_2, h_1, h_2, setitem_1, mul_3, setitem_2], Original ATen: [aten.mul, aten.div, aten.add, aten.remainder, aten.copy]
# Source node to ATen node mapping:
# h_1 => add_1
# h_2 => remainder_1
# mul_2 => mul_3
# mul_3 => mul_4
# setitem_1 => copy
# setitem_2 => copy_1
# truediv_2 => div_2
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%uniform, 255.0), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, 360.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_4, %div_2), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%select_int, %add_1, 2, 0, 9223372036854775807), kwargs = {})
# %select_scatter_default : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%index_put, %slice_scatter_default, 1, 0), kwargs = {})
# %remainder_1 : [num_users=1] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%select_6, 1), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_8, %remainder_1), kwargs = {})
# %select_scatter_default_1 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %copy, 1, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, %uniform_1), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_14, %mul_4), kwargs = {})
# %select_scatter_default_2 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %copy_1, 1, 1), kwargs = {})
triton_poi_fused_add_copy_div_mul_remainder_4 = async_compile.triton('triton_poi_fused_add_copy_div_mul_remainder_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_copy_div_mul_remainder_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_copy_div_mul_remainder_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 3
x0 = xindex % 16
x2 = (xindex // 48)
x3 = xindex
tmp6 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (16 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (x3), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp1 == tmp3
tmp5 = tmp3 == tmp3
tmp8 = 255.0
tmp9 = tmp7 * tmp8
tmp10 = 0.002777777777777778
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp13 = tl.where(tmp5, tmp12, tmp6)
tmp14 = 1.0
tmp15 = tmp13 % tmp14
tmp16 = tmp15 != tmp3
tmp17 = libdevice.signbit(tmp15) if (tmp15).dtype is tl.float32 else tmp15 < 0
tmp18 = libdevice.signbit(tmp14) if (tmp14).dtype is tl.float32 else tmp14 < 0
tmp19 = tmp17 != tmp18
tmp20 = tmp16 & tmp19
tmp21 = tmp15 + tmp14
tmp22 = tl.where(tmp20, tmp21, tmp15)
tmp24 = tl.where(tmp4, tmp12, tmp23)
tmp25 = tl.where(tmp4, tmp22, tmp24)
tmp27 = tmp25 * tmp26
tmp28 = tmp0 == tmp3
tmp30 = tl.where(tmp28, tmp12, tmp29)
tmp31 = tl.where(tmp28, tmp22, tmp30)
tmp32 = tl.where(tmp2, tmp27, tmp31)
tl.store(out_ptr0 + (x3), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zq/czq45ck4e3bfpgidquurvbk77tkggknyyrok7iefanwqczot3sts.py
# Topologically Sorted Source Nodes: [mul_4, setitem_3, x, v, s, c, h_3, mul_6, add_1, k, sub_4, t, t_1, mul_7, x_1, means, sub_6, mul_8, x_2, inputs], Original ATen: [aten.mul, aten.copy, aten.clamp, aten.index, aten.add, aten.remainder, aten.rsub, aten.minimum, aten.sub, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_2
# c => mul_6
# h_3 => index
# inputs => clamp_max_2, clamp_min_2
# k => remainder_2
# means => mean
# mul_4 => mul_5
# mul_6 => mul_7
# mul_7 => mul_8
# mul_8 => mul_9
# s => index_1
# setitem_3 => copy_2
# sub_4 => sub_4
# sub_6 => sub_6
# t => minimum
# t_1 => clamp_max_1, clamp_min_1
# v => index_2
# x => clamp_max, clamp_min
# x_1 => sub_5
# x_2 => add_3
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, %uniform_2), kwargs = {})
# %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_20, %mul_5), kwargs = {})
# %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %copy_2, 1, 2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%select_scatter_default_3, 0), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
# %index_2 : [num_users=2] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_6]), kwargs = {})
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_5]), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %index_1), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_4]), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, 6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_7), kwargs = {})
# %remainder_2 : [num_users=2] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%add_2, 6), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4.0, %remainder_2), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%remainder_2, %sub_4), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%minimum, 0), kwargs = {})
# %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %clamp_max_1), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index_2, %mul_8), kwargs = {})
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_5, [2, 3], True), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %mean), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %uniform_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mean), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {})
# %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1), kwargs = {})
triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5 = async_compile.triton('triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 12
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 3
r2 = rindex
x1 = (xindex // 3)
x3 = xindex
tmp13 = tl.load(in_ptr0 + (32 + r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr0 + (16 + r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1, 1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1, 1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 3.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = 5.0
tmp9 = tl.where(tmp2, tmp8, tmp7)
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = tl.full([1, 1], 2, tl.int32)
tmp12 = tmp10 == tmp11
tmp15 = tmp13 * tmp14
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = triton_helpers.minimum(tmp19, tmp6)
tmp21 = 6.0
tmp22 = tmp20 * tmp21
tmp23 = tmp9 + tmp22
tmp24 = tmp23 % tmp21
tmp25 = tmp24 != tmp10
tmp26 = libdevice.signbit(tmp24) if (tmp24).dtype is tl.float32 else tmp24 < 0
tmp27 = libdevice.signbit(tmp21) if (tmp21).dtype is tl.float32 else tmp21 < 0
tmp28 = tmp26 != tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp24 + tmp21
tmp31 = tl.where(tmp29, tmp30, tmp24)
tmp32 = 4.0
tmp33 = tmp32 - tmp31
tmp34 = triton_helpers.minimum(tmp31, tmp33)
tmp35 = triton_helpers.maximum(tmp34, tmp18)
tmp36 = tmp11 == tmp11
tmp37 = tl.where(tmp36, tmp15, tmp13)
tmp38 = triton_helpers.maximum(tmp37, tmp18)
tmp39 = triton_helpers.minimum(tmp38, tmp6)
tmp40 = tl.full([1, 1], 1, tl.int32)
tmp41 = tmp40 == tmp11
tmp43 = tl.where(tmp41, tmp15, tmp42)
tmp44 = triton_helpers.maximum(tmp43, tmp18)
tmp45 = triton_helpers.minimum(tmp44, tmp6)
tmp46 = tmp39 * tmp45
tmp47 = triton_helpers.minimum(tmp35, tmp6)
tmp48 = tmp46 * tmp47
tmp49 = tmp39 - tmp48
tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK])
tmp52 = tl.where(xmask, tmp50, 0)
tmp53 = tl.sum(tmp52, 1)[:, None]
tmp54 = 16.0
tmp55 = tmp53 / tmp54
tmp56 = tmp49 - tmp55
tmp58 = tmp56 * tmp57
tmp59 = tmp58 + tmp55
tmp60 = triton_helpers.maximum(tmp59, tmp18)
tmp61 = triton_helpers.minimum(tmp60, tmp6)
tl.store(out_ptr1 + (r2 + (16*x3)), tmp61, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12, 4), (48, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hsv], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_1.run(buf0, buf0, 192, grid=grid(192), stream=stream0)
buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_h], Original ATen: [aten.new_zeros]
triton_poi_fused_new_zeros_2.run(buf7, 4, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [f_h, uniform_], Original ATen: [aten.new_zeros, aten.uniform]
buf8 = torch.ops.aten.uniform.default(buf7, -4.0, 4.0)
buf9 = buf8
del buf8
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [f_s], Original ATen: [aten.new_ones]
triton_poi_fused_new_ones_3.run(buf10, 4, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [f_s, f_s_1], Original ATen: [aten.new_ones, aten.uniform]
buf11 = torch.ops.aten.uniform.default(buf10, 0.0, 5.0)
del buf10
buf12 = buf11
del buf11
buf13 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, truediv_2, h_1, h_2, setitem_1, mul_3, setitem_2], Original ATen: [aten.mul, aten.div, aten.add, aten.remainder, aten.copy]
triton_poi_fused_add_copy_div_mul_remainder_4.run(buf0, buf9, buf12, buf13, 192, grid=grid(192), stream=stream0)
del buf12
buf14 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [f_v], Original ATen: [aten.new_ones]
triton_poi_fused_new_ones_3.run(buf14, 4, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [f_v, f_v_1], Original ATen: [aten.new_ones, aten.uniform]
buf15 = torch.ops.aten.uniform.default(buf14, 0.0, 5.0)
buf16 = buf15
del buf15
buf20 = reinterpret_tensor(buf14, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [factor], Original ATen: [aten.uniform]
buf21 = torch.ops.aten.uniform.default(buf20, 0.0, 5.0)
del buf20
buf22 = buf21
del buf21
buf23 = reinterpret_tensor(buf0, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [mul_4, setitem_3, x, v, s, c, h_3, mul_6, add_1, k, sub_4, t, t_1, mul_7, x_1, means, sub_6, mul_8, x_2, inputs], Original ATen: [aten.mul, aten.copy, aten.clamp, aten.index, aten.add, aten.remainder, aten.rsub, aten.minimum, aten.sub, aten.mean]
triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5.run(buf13, buf16, buf22, buf23, 12, 16, grid=grid(12), stream=stream0)
del buf13
del buf16
del buf22
return (buf23, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.autograd import Function
import math
import numbers
import torch
import numpy as np
import torch.nn as nn
def hsv2rgb(hsv):
"""Convert a 4-d HSV tensor to the RGB counterpart.
>>> %timeit hsv2rgb_lookup(hsv)
2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
>>> %timeit hsv2rgb(rgb)
298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6)
True
References
[1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative
"""
h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]]
c = v * s
n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1)
k = (n + h * 6) % 6
t = torch.min(k, 4.0 - k)
t = torch.clamp(t, 0, 1)
return v - c * t
def rgb2hsv(rgb):
"""Convert a 4-d RGB tensor to the HSV counterpart.
Here, we compute hue using atan2() based on the definition in [1],
instead of using the common lookup table approach as in [2, 3].
Those values agree when the angle is a multiple of 30°,
otherwise they may differ at most ~1.2°.
>>> %timeit rgb2hsv_lookup(rgb)
1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> %timeit rgb2hsv(rgb)
380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max()
tensor(0.0031, device='cuda:0')
References
[1] https://en.wikipedia.org/wiki/Hue
[2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html
[3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212
"""
r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :]
Cmax = rgb.max(1)[0]
Cmin = rgb.min(1)[0]
hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b)
hue = hue % (2 * math.pi) / (2 * math.pi)
saturate = 1 - Cmin / (Cmax + 1e-08)
value = Cmax
hsv = torch.stack([hue, saturate, value], dim=1)
hsv[~torch.isfinite(hsv)] = 0.0
return hsv
class RandomHSVFunction(Function):
@staticmethod
def forward(ctx, x, f_h, f_s, f_v):
x = rgb2hsv(x)
h = x[:, 0, :, :]
h += f_h * 255.0 / 360.0
h = h % 1
x[:, 0, :, :] = h
x[:, 1, :, :] = x[:, 1, :, :] * f_s
x[:, 2, :, :] = x[:, 2, :, :] * f_v
x = torch.clamp(x, 0, 1)
x = hsv2rgb(x)
return x
@staticmethod
def backward(ctx, grad_output):
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.clone()
return grad_input, None, None, None
class ColorJitterLayer(nn.Module):
def __init__(self, brightness, contrast, saturation, hue):
super(ColorJitterLayer, self).__init__()
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5
), clip_first_on_zero=False)
def _check_input(self, value, name, center=1, bound=(0, float('inf')),
clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(
'If {} is a single number, it must be non negative.'.
format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError('{} values should be between {}'.format(
name, bound))
else:
raise TypeError(
'{} should be a single number or a list/tuple with lenght 2.'
.format(name))
if value[0] == value[1] == center:
value = None
return value
def adjust_contrast(self, x):
if self.contrast:
factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast)
means = torch.mean(x, dim=[2, 3], keepdim=True)
x = (x - means) * factor + means
return torch.clamp(x, 0, 1)
def adjust_hsv(self, x):
f_h = x.new_zeros(x.size(0), 1, 1)
f_s = x.new_ones(x.size(0), 1, 1)
f_v = x.new_ones(x.size(0), 1, 1)
if self.hue:
f_h.uniform_(*self.hue)
if self.saturation:
f_s = f_s.uniform_(*self.saturation)
if self.brightness:
f_v = f_v.uniform_(*self.brightness)
return RandomHSVFunction.apply(x, f_h, f_s, f_v)
def transform(self, inputs):
if np.random.rand() > 0.5:
transforms = [self.adjust_contrast, self.adjust_hsv]
else:
transforms = [self.adjust_hsv, self.adjust_contrast]
for t in transforms:
inputs = t(inputs)
return inputs
def forward(self, inputs):
return self.transform(inputs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'brightness': 4, 'contrast': 4, 'saturation': 4, 'hue': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.autograd import Function
import math
import numbers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 12
x0 = xindex % 4
x2 = xindex // 48
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 1.7320508075688772
tmp9 = tmp7 * tmp8
tmp10 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp11 = 2.0
tmp12 = tmp10 * tmp11
tmp13 = tmp12 - tmp5
tmp14 = tmp13 - tmp6
tmp15 = libdevice.atan2(tmp9, tmp14)
tmp16 = 6.283185307179586
tmp17 = tmp15 % tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = tmp17 != tmp18
tmp20 = libdevice.signbit(tmp17
) if tmp17.dtype is tl.float32 else tmp17 < 0
tmp21 = libdevice.signbit(tmp16
) if tmp16.dtype is tl.float32 else tmp16 < 0
tmp22 = tmp20 != tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 + tmp16
tmp25 = tl.where(tmp23, tmp24, tmp17)
tmp26 = 0.15915494309189535
tmp27 = tmp25 * tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp4, tmp27, tmp28)
tmp30 = tmp0 >= tmp3
tmp31 = tl.full([1], 8, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask,
other=0.0)
tmp35 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 &
xmask, other=0.0)
tmp36 = triton_helpers.minimum(tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 &
xmask, other=0.0)
tmp38 = triton_helpers.minimum(tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 &
xmask, other=0.0)
tmp40 = triton_helpers.minimum(tmp38, tmp39)
tmp41 = triton_helpers.maximum(tmp34, tmp35)
tmp42 = triton_helpers.maximum(tmp41, tmp37)
tmp43 = triton_helpers.maximum(tmp42, tmp39)
tmp44 = 1e-08
tmp45 = tmp43 + tmp44
tmp46 = tmp40 / tmp45
tmp47 = 1.0
tmp48 = tmp47 - tmp46
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp33, tmp48, tmp49)
tmp51 = tmp0 >= tmp31
tl.full([1], 12, tl.int64)
tmp54 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask,
other=0.0)
tmp55 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 &
xmask, other=0.0)
tmp56 = triton_helpers.maximum(tmp54, tmp55)
tmp57 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 &
xmask, other=0.0)
tmp58 = triton_helpers.maximum(tmp56, tmp57)
tmp59 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 &
xmask, other=0.0)
tmp60 = triton_helpers.maximum(tmp58, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp51, tmp60, tmp61)
tmp63 = tl.where(tmp33, tmp50, tmp62)
tmp64 = tl.where(tmp4, tmp29, tmp63)
tl.store(out_ptr0 + x3, tmp64, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 == tmp0
tmp2 = tl_math.abs(tmp0)
tmp3 = float('inf')
tmp4 = tmp2 != tmp3
tmp5 = tmp1 & tmp4
tmp6 = tmp5 == 0
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp0)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_new_ones_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_copy_div_mul_remainder_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp6 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + x3, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp1 == tmp3
tmp5 = tmp3 == tmp3
tmp8 = 255.0
tmp9 = tmp7 * tmp8
tmp10 = 0.002777777777777778
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp13 = tl.where(tmp5, tmp12, tmp6)
tmp14 = 1.0
tmp15 = tmp13 % tmp14
tmp16 = tmp15 != tmp3
tmp17 = libdevice.signbit(tmp15
) if tmp15.dtype is tl.float32 else tmp15 < 0
tmp18 = libdevice.signbit(tmp14
) if tmp14.dtype is tl.float32 else tmp14 < 0
tmp19 = tmp17 != tmp18
tmp20 = tmp16 & tmp19
tmp21 = tmp15 + tmp14
tmp22 = tl.where(tmp20, tmp21, tmp15)
tmp24 = tl.where(tmp4, tmp12, tmp23)
tmp25 = tl.where(tmp4, tmp22, tmp24)
tmp27 = tmp25 * tmp26
tmp28 = tmp0 == tmp3
tmp30 = tl.where(tmp28, tmp12, tmp29)
tmp31 = tl.where(tmp28, tmp22, tmp30)
tmp32 = tl.where(tmp2, tmp27, tmp31)
tl.store(out_ptr0 + x3, tmp32, xmask)
@triton.jit
def triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5(
in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 3
r2 = rindex
x1 = xindex // 3
x3 = xindex
tmp13 = tl.load(in_ptr0 + (32 + r2 + 48 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (r2 + 48 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr0 + (16 + r2 + 48 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1, 1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1, 1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 3.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = 5.0
tmp9 = tl.where(tmp2, tmp8, tmp7)
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = tl.full([1, 1], 2, tl.int32)
tmp12 = tmp10 == tmp11
tmp15 = tmp13 * tmp14
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = triton_helpers.minimum(tmp19, tmp6)
tmp21 = 6.0
tmp22 = tmp20 * tmp21
tmp23 = tmp9 + tmp22
tmp24 = tmp23 % tmp21
tmp25 = tmp24 != tmp10
tmp26 = libdevice.signbit(tmp24
) if tmp24.dtype is tl.float32 else tmp24 < 0
tmp27 = libdevice.signbit(tmp21
) if tmp21.dtype is tl.float32 else tmp21 < 0
tmp28 = tmp26 != tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp24 + tmp21
tmp31 = tl.where(tmp29, tmp30, tmp24)
tmp32 = 4.0
tmp33 = tmp32 - tmp31
tmp34 = triton_helpers.minimum(tmp31, tmp33)
tmp35 = triton_helpers.maximum(tmp34, tmp18)
tmp36 = tmp11 == tmp11
tmp37 = tl.where(tmp36, tmp15, tmp13)
tmp38 = triton_helpers.maximum(tmp37, tmp18)
tmp39 = triton_helpers.minimum(tmp38, tmp6)
tmp40 = tl.full([1, 1], 1, tl.int32)
tmp41 = tmp40 == tmp11
tmp43 = tl.where(tmp41, tmp15, tmp42)
tmp44 = triton_helpers.maximum(tmp43, tmp18)
tmp45 = triton_helpers.minimum(tmp44, tmp6)
tmp46 = tmp39 * tmp45
tmp47 = triton_helpers.minimum(tmp35, tmp6)
tmp48 = tmp46 * tmp47
tmp49 = tmp39 - tmp48
tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK])
tmp52 = tl.where(xmask, tmp50, 0)
tmp53 = tl.sum(tmp52, 1)[:, None]
tmp54 = 16.0
tmp55 = tmp53 / tmp54
tmp56 = tmp49 - tmp55
tmp58 = tmp56 * tmp57
tmp59 = tmp58 + tmp55
tmp60 = triton_helpers.maximum(tmp59, tmp18)
tmp61 = triton_helpers.minimum(tmp60, tmp6)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp61, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12, 4), (48, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
triton_poi_fused_index_put_lift_fresh_1[grid(192)](buf0, buf0, 192,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_new_zeros_2[grid(4)](buf7, 4, XBLOCK=4, num_warps=
1, num_stages=1)
buf8 = torch.ops.aten.uniform.default(buf7, -4.0, 4.0)
buf9 = buf8
del buf8
buf10 = buf7
del buf7
triton_poi_fused_new_ones_3[grid(4)](buf10, 4, XBLOCK=4, num_warps=
1, num_stages=1)
buf11 = torch.ops.aten.uniform.default(buf10, 0.0, 5.0)
del buf10
buf12 = buf11
del buf11
buf13 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
triton_poi_fused_add_copy_div_mul_remainder_4[grid(192)](buf0, buf9,
buf12, buf13, 192, XBLOCK=256, num_warps=4, num_stages=1)
del buf12
buf14 = buf9
del buf9
triton_poi_fused_new_ones_3[grid(4)](buf14, 4, XBLOCK=4, num_warps=
1, num_stages=1)
buf15 = torch.ops.aten.uniform.default(buf14, 0.0, 5.0)
buf16 = buf15
del buf15
buf20 = reinterpret_tensor(buf14, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf14
buf21 = torch.ops.aten.uniform.default(buf20, 0.0, 5.0)
del buf20
buf22 = buf21
del buf21
buf23 = reinterpret_tensor(buf0, (4, 3, 4, 4), (48, 16, 4, 1), 0)
del buf0
triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5[
grid(12)](buf13, buf16, buf22, buf23, 12, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf13
del buf16
del buf22
return buf23,
def hsv2rgb(hsv):
"""Convert a 4-d HSV tensor to the RGB counterpart.
>>> %timeit hsv2rgb_lookup(hsv)
2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
>>> %timeit hsv2rgb(rgb)
298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6)
True
References
[1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative
"""
h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]]
c = v * s
n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1)
k = (n + h * 6) % 6
t = torch.min(k, 4.0 - k)
t = torch.clamp(t, 0, 1)
return v - c * t
def rgb2hsv(rgb):
"""Convert a 4-d RGB tensor to the HSV counterpart.
Here, we compute hue using atan2() based on the definition in [1],
instead of using the common lookup table approach as in [2, 3].
Those values agree when the angle is a multiple of 30°,
otherwise they may differ at most ~1.2°.
>>> %timeit rgb2hsv_lookup(rgb)
1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> %timeit rgb2hsv(rgb)
380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max()
tensor(0.0031, device='cuda:0')
References
[1] https://en.wikipedia.org/wiki/Hue
[2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html
[3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212
"""
r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :]
Cmax = rgb.max(1)[0]
Cmin = rgb.min(1)[0]
hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b)
hue = hue % (2 * math.pi) / (2 * math.pi)
saturate = 1 - Cmin / (Cmax + 1e-08)
value = Cmax
hsv = torch.stack([hue, saturate, value], dim=1)
hsv[~torch.isfinite(hsv)] = 0.0
return hsv
class RandomHSVFunction(Function):
@staticmethod
def forward(ctx, x, f_h, f_s, f_v):
x = rgb2hsv(x)
h = x[:, 0, :, :]
h += f_h * 255.0 / 360.0
h = h % 1
x[:, 0, :, :] = h
x[:, 1, :, :] = x[:, 1, :, :] * f_s
x[:, 2, :, :] = x[:, 2, :, :] * f_v
x = torch.clamp(x, 0, 1)
x = hsv2rgb(x)
return x
@staticmethod
def backward(ctx, grad_output):
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.clone()
return grad_input, None, None, None
class ColorJitterLayerNew(nn.Module):
def __init__(self, brightness, contrast, saturation, hue):
super(ColorJitterLayerNew, self).__init__()
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5
), clip_first_on_zero=False)
def _check_input(self, value, name, center=1, bound=(0, float('inf')),
clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(
'If {} is a single number, it must be non negative.'.
format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError('{} values should be between {}'.format(
name, bound))
else:
raise TypeError(
'{} should be a single number or a list/tuple with lenght 2.'
.format(name))
if value[0] == value[1] == center:
value = None
return value
def adjust_contrast(self, x):
if self.contrast:
factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast)
means = torch.mean(x, dim=[2, 3], keepdim=True)
x = (x - means) * factor + means
return torch.clamp(x, 0, 1)
def adjust_hsv(self, x):
f_h = x.new_zeros(x.size(0), 1, 1)
f_s = x.new_ones(x.size(0), 1, 1)
f_v = x.new_ones(x.size(0), 1, 1)
if self.hue:
f_h.uniform_(*self.hue)
if self.saturation:
f_s = f_s.uniform_(*self.saturation)
if self.brightness:
f_v = f_v.uniform_(*self.brightness)
return RandomHSVFunction.apply(x, f_h, f_s, f_v)
def transform(self, inputs):
if np.random.rand() > 0.5:
transforms = [self.adjust_contrast, self.adjust_hsv]
else:
transforms = [self.adjust_hsv, self.adjust_contrast]
for t in transforms:
inputs = t(inputs)
return inputs
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Jinoh-Cho/Visual-Genome-Image-Inpainting
|
ColorJitterLayer
| false | 9,226 |
[
"MIT"
] | 0 |
f8c43bf2e4a9139d4c35903d0c323b9d8eb54859
|
https://github.com/Jinoh-Cho/Visual-Genome-Image-Inpainting/tree/f8c43bf2e4a9139d4c35903d0c323b9d8eb54859
|
ScaledDotProductAttention
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/4q/c4qoh645afcunrhaa5xye6sbkw2mzzlvmntdpffld4732bbjzx7o.py
# Topologically Sorted Source Nodes: [scaling_factor, attention], Original ATen: [aten.sqrt, aten._softmax]
# Source node to ATen node mapping:
# attention => exp
# scaling_factor => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %where_self), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_sqrt_0 = async_compile.triton('triton_poi_fused__softmax_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scaling_factor, attention], Original ATen: [aten.sqrt, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class ScaledDotProductAttention(nn.Module):
"""
Attention mechansims usually scale values based on relationships between
keys and queries.
Attention(Q,K,V) = A(Q,K)*V where A() is a normalization function.
A common choice for the normalization function is scaled dot-product attention:
A(Q,K) = Softmax(Q*K^T / sqrt(d_attention))
Args:
dropout (float): Fraction between 0 and 1 corresponding to the degree of dropout used
"""
def __init__(self, dropout=0.0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, query, key, value, mask=None):
"""
Args:
query (torch.tensor):
key (torch.tensor):
value (torch.tensor):
mask (torch.tensor):
"""
d_k = key.shape[-1]
scaling_factor = torch.sqrt(torch.tensor(d_k))
scaled_dot_product = torch.matmul(query, key.permute(0, 2, 1)
) / scaling_factor
if mask is not None:
scaled_dot_product = scaled_dot_product.masked_fill(mask == 0,
-1000000000.0)
attention = self.softmax(scaled_dot_product)
attention = self.dropout(attention)
output = torch.matmul(attention, value)
return output, attention
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
"""
Attention mechansims usually scale values based on relationships between
keys and queries.
Attention(Q,K,V) = A(Q,K)*V where A() is a normalization function.
A common choice for the normalization function is scaled dot-product attention:
A(Q,K) = Softmax(Q*K^T / sqrt(d_attention))
Args:
dropout (float): Fraction between 0 and 1 corresponding to the degree of dropout used
"""
def __init__(self, dropout=0.0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
KalleBylin/tft_webapp
|
ScaledDotProductAttention
| false | 9,227 |
[
"Apache-2.0"
] | 0 |
008f109e77f8bada417655dab482f340adb8cb6b
|
https://github.com/KalleBylin/tft_webapp/tree/008f109e77f8bada417655dab482f340adb8cb6b
|
LearnedPositionalEmbedding
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5i/c5iybmnijeaxq3pumkl5crtkns462pwdrh72bxy4lcvnlh3r4364.py
# Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# mask => convert_element_type
# ne => ne
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%convert_element_type, 1), kwargs = {})
triton_per_fused__to_copy_cumsum_ne_0 = async_compile.triton('triton_per_fused__to_copy_cumsum_ne_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_cumsum_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = 4.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int32)
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ft/cftxaavy7b7scxgnrhfsvfnicvimxnf3kpckow5nzkyed3meyoli.py
# Topologically Sorted Source Nodes: [ne, mask, type_as, incremental_indices, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add]
# Source node to ATen node mapping:
# incremental_indices => mul
# long => convert_element_type_2
# mask => convert_element_type
# ne => ne
# positions => add
# type_as => convert_element_type_1
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%cumsum, torch.int32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %convert_element_type), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul, torch.int64), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 4), kwargs = {})
triton_poi_fused__to_copy_add_mul_ne_1 = async_compile.triton('triton_poi_fused__to_copy_add_mul_ne_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_mul_ne_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0.to(tl.int32)
tmp3 = 4.0
tmp4 = tmp2 != tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = tmp1 * tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/in/cinglqnf6mtochspmiolvr3bqay6yiivzgqlihpkdlbd5p4ccw54.py
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
# Source node to ATen node mapping:
# embedding => embedding
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %add, 4), kwargs = {})
triton_poi_fused_embedding_2 = async_compile.triton('triton_poi_fused_embedding_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 9)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 9")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_cumsum_ne_0.run(primals_1, buf0, 64, 4, grid=grid(64), stream=stream0)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [ne, mask, type_as, incremental_indices, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add]
triton_poi_fused__to_copy_add_mul_ne_1.run(buf1, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
triton_poi_fused_embedding_2.run(buf1, primals_2, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_2
return (buf2, buf1, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((9, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
from torch import nn
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
assert padding_idx is not None
num_embeddings += padding_idx + 1
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx
)
def forward(self, input, use_cache=False):
"""Input is expected to be of size [bsz x seqlen]."""
if use_cache:
pos = int(self.padding_idx + input.size(1))
positions = input.data.new(1, 1).fill_(pos)
else:
positions = create_position_ids_from_input_ids(input, self.
padding_idx)
return super().forward(positions), positions
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'padding_idx': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = 4.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int32)
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.int32)
tmp3 = 4.0
tmp4 = tmp2 != tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = tmp1 * tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 9) | ~xmask,
'index out of bounds: 0 <= tmp4 < 9')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_per_fused__to_copy_cumsum_ne_0[grid(64)](primals_1, buf0, 64,
4, XBLOCK=32, num_warps=2, num_stages=1)
buf1 = buf0
del buf0
triton_poi_fused__to_copy_add_mul_ne_1[grid(256)](buf1, primals_1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_embedding_2[grid(1024)](buf1, primals_2, buf2,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, buf1, buf1
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LearnedPositionalEmbeddingNew(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
assert padding_idx is not None
num_embeddings += padding_idx + 1
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx
)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
JuruoMP/gap-exp
|
LearnedPositionalEmbedding
| false | 9,228 |
[
"Apache-2.0"
] | 0 |
2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05
|
https://github.com/JuruoMP/gap-exp/tree/2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05
|
Gaussian_Kernel_Function
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/lk/clkeshkzkkdncwbqxprw5cxci6ivmnmzov4x5wbr7zbgzfxclmwb.py
# Topologically Sorted Source Nodes: [aa, norm, pow_1, neg, div, exp, vaa, bb, norm_1, pow_2, neg_1, div_1, exp_1, vbb, add, ab, norm_2, pow_3, neg_2, div_2, exp_2, vab, mul, loss], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.pow, aten.neg, aten.div, aten.exp, aten.mean, aten.add, aten.mul]
# Source node to ATen node mapping:
# aa => sub
# ab => sub_2
# add => add
# bb => sub_1
# div => div
# div_1 => div_1
# div_2 => div_2
# exp => exp
# exp_1 => exp_1
# exp_2 => exp_2
# loss => sub_3
# mul => mul
# neg => neg
# neg_1 => neg_1
# neg_2 => neg_2
# norm => pow_1, pow_2, sum_1
# norm_1 => pow_4, pow_5, sum_2
# norm_2 => pow_7, pow_8, sum_3
# pow_1 => pow_3
# pow_2 => pow_6
# pow_3 => pow_9
# vaa => mean
# vab => mean_2
# vbb => mean_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_3,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 16), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %view_3), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [2]), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_5, 2), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_6,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg_1, 16), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_1,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%exp_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_3), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [2]), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_8, 2), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_9,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg_2, 16), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_2,), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%exp_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 2.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {})
triton_red_fused_add_div_exp_linalg_vector_norm_mean_mul_neg_pow_sub_0 = async_compile.triton('triton_red_fused_add_div_exp_linalg_vector_norm_mean_mul_neg_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_exp_linalg_vector_norm_mean_mul_neg_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_exp_linalg_vector_norm_mean_mul_neg_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp26 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp53 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = (rindex // 64)
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r0), rmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (1 + (4*r0)), rmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr0 + (2 + (4*r0)), rmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (3 + (4*r0)), rmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + (4*r0), rmask, eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr1 + (1 + (4*r0)), rmask, eviction_policy='evict_last', other=0.0)
tmp37 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr1 + (2 + (4*r0)), rmask, eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp43 = tl.load(in_ptr1 + (3 + (4*r0)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = -tmp20
tmp22 = 0.0625
tmp23 = tmp21 * tmp22
tmp24 = tl_math.exp(tmp23)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = _tmp26 + tmp25
_tmp26 = tl.where(rmask, tmp27, _tmp26)
tmp30 = tmp28 - tmp29
tmp31 = tmp30 * tmp30
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tmp31 + tmp35
tmp39 = tmp37 - tmp38
tmp40 = tmp39 * tmp39
tmp41 = tmp36 + tmp40
tmp44 = tmp42 - tmp43
tmp45 = tmp44 * tmp44
tmp46 = tmp41 + tmp45
tmp47 = libdevice.sqrt(tmp46)
tmp48 = tmp47 * tmp47
tmp49 = -tmp48
tmp50 = tmp49 * tmp22
tmp51 = tl_math.exp(tmp50)
tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK])
tmp54 = _tmp53 + tmp52
_tmp53 = tl.where(rmask, tmp54, _tmp53)
tmp55 = tmp0 - tmp29
tmp56 = tmp55 * tmp55
tmp57 = tmp4 - tmp33
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tmp9 - tmp38
tmp61 = tmp60 * tmp60
tmp62 = tmp59 + tmp61
tmp63 = tmp14 - tmp43
tmp64 = tmp63 * tmp63
tmp65 = tmp62 + tmp64
tmp66 = libdevice.sqrt(tmp65)
tmp67 = tmp66 * tmp66
tmp68 = -tmp67
tmp69 = tmp68 * tmp22
tmp70 = tl_math.exp(tmp69)
tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK])
tmp73 = _tmp72 + tmp71
_tmp72 = tl.where(rmask, tmp73, _tmp72)
tmp26 = tl.sum(_tmp26, 1)[:, None]
tmp53 = tl.sum(_tmp53, 1)[:, None]
tmp72 = tl.sum(_tmp72, 1)[:, None]
tmp74 = 4096.0
tmp75 = tmp26 / tmp74
tmp76 = tmp53 / tmp74
tmp77 = tmp75 + tmp76
tmp78 = tmp72 / tmp74
tmp79 = 2.0
tmp80 = tmp78 * tmp79
tmp81 = tmp77 - tmp80
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp81, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [aa, norm, pow_1, neg, div, exp, vaa, bb, norm_1, pow_2, neg_1, div_1, exp_1, vbb, add, ab, norm_2, pow_3, neg_2, div_2, exp_2, vab, mul, loss], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.pow, aten.neg, aten.div, aten.exp, aten.mean, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_red_fused_add_div_exp_linalg_vector_norm_mean_mul_neg_pow_sub_0.run(buf3, arg0_1, arg1_1, 1, 4096, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class Gaussian_Kernel_Function(nn.Module):
def __init__(self, std):
super(Gaussian_Kernel_Function, self).__init__()
self.sigma = std ** 2
def forward(self, fa, fb):
asize = fa.size()
bsize = fb.size()
fa1 = fa.view(-1, 1, asize[1])
fa2 = fa.view(1, -1, asize[1])
fb1 = fb.view(-1, 1, bsize[1])
fb2 = fb.view(1, -1, bsize[1])
aa = fa1 - fa2
vaa = torch.mean(torch.exp(torch.div(-torch.pow(torch.norm(aa, 2,
dim=2), 2), self.sigma)))
bb = fb1 - fb2
vbb = torch.mean(torch.exp(torch.div(-torch.pow(torch.norm(bb, 2,
dim=2), 2), self.sigma)))
ab = fa1 - fb2
vab = torch.mean(torch.exp(torch.div(-torch.pow(torch.norm(ab, 2,
dim=2), 2), self.sigma)))
loss = vaa + vbb - 2.0 * vab
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'std': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_red_fused_add_div_exp_linalg_vector_norm_mean_mul_neg_pow_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp26 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp53 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex // 64
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r0, rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (1 + 4 * r0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr0 + (2 + 4 * r0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (3 + 4 * r0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + 4 * r0, rmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr1 + (1 + 4 * r0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tl.load(in_ptr1 + (2 + 4 * r0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp43 = tl.load(in_ptr1 + (3 + 4 * r0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = -tmp20
tmp22 = 0.0625
tmp23 = tmp21 * tmp22
tmp24 = tl_math.exp(tmp23)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = _tmp26 + tmp25
_tmp26 = tl.where(rmask, tmp27, _tmp26)
tmp30 = tmp28 - tmp29
tmp31 = tmp30 * tmp30
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tmp31 + tmp35
tmp39 = tmp37 - tmp38
tmp40 = tmp39 * tmp39
tmp41 = tmp36 + tmp40
tmp44 = tmp42 - tmp43
tmp45 = tmp44 * tmp44
tmp46 = tmp41 + tmp45
tmp47 = libdevice.sqrt(tmp46)
tmp48 = tmp47 * tmp47
tmp49 = -tmp48
tmp50 = tmp49 * tmp22
tmp51 = tl_math.exp(tmp50)
tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK])
tmp54 = _tmp53 + tmp52
_tmp53 = tl.where(rmask, tmp54, _tmp53)
tmp55 = tmp0 - tmp29
tmp56 = tmp55 * tmp55
tmp57 = tmp4 - tmp33
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tmp9 - tmp38
tmp61 = tmp60 * tmp60
tmp62 = tmp59 + tmp61
tmp63 = tmp14 - tmp43
tmp64 = tmp63 * tmp63
tmp65 = tmp62 + tmp64
tmp66 = libdevice.sqrt(tmp65)
tmp67 = tmp66 * tmp66
tmp68 = -tmp67
tmp69 = tmp68 * tmp22
tmp70 = tl_math.exp(tmp69)
tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK])
tmp73 = _tmp72 + tmp71
_tmp72 = tl.where(rmask, tmp73, _tmp72)
tmp26 = tl.sum(_tmp26, 1)[:, None]
tmp53 = tl.sum(_tmp53, 1)[:, None]
tmp72 = tl.sum(_tmp72, 1)[:, None]
tmp74 = 4096.0
tmp75 = tmp26 / tmp74
tmp76 = tmp53 / tmp74
tmp77 = tmp75 + tmp76
tmp78 = tmp72 / tmp74
tmp79 = 2.0
tmp80 = tmp78 * tmp79
tmp81 = tmp77 - tmp80
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp81, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_red_fused_add_div_exp_linalg_vector_norm_mean_mul_neg_pow_sub_0[
grid(1)](buf3, arg0_1, arg1_1, 1, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class Gaussian_Kernel_FunctionNew(nn.Module):
def __init__(self, std):
super(Gaussian_Kernel_FunctionNew, self).__init__()
self.sigma = std ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LOUEY233/Toward-Mutual-Information
|
Gaussian_Kernel_Function
| false | 9,229 |
[
"MIT"
] | 0 |
cde9ce5c9920bbc9c6e39dafb61ff1dd0c97772f
|
https://github.com/LOUEY233/Toward-Mutual-Information/tree/cde9ce5c9920bbc9c6e39dafb61ff1dd0c97772f
|
Gaussian_Distance
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/b7/cb7i4pf3kjpgs5yl6jdlrdjcmezn4u75kg4xt2hgdolfmosqezx4.py
# Topologically Sorted Source Nodes: [sub, pow_1, add, div, add_1, log, add_2, mul, exp_2, vaa, sub_2, pow_3, add_6, div_2, add_7, log_2, add_8, mul_2, exp_4, vbb, add_9, sub_1, pow_2, add_3, div_1, add_4, log_1, add_5, mul_1, exp_3, vab, mul_3, loss], Original ATen: [aten.sub, aten.pow, aten.add, aten.div, aten.log, aten.mul, aten.exp, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# add_7 => add_7
# add_8 => add_8
# add_9 => add_9
# div => div_2
# div_1 => div_3
# div_2 => div_4
# exp_2 => exp_2
# exp_3 => exp_3
# exp_4 => exp_4
# log => log
# log_1 => log_1
# log_2 => log_2
# loss => sub_3
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# vaa => sum_1
# vab => sum_2
# vbb => sum_3
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %view_3), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_1, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %view_3), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, -0.5), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%exp_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %view_5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %view_7), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_3, %add_6), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %view_7), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_7,), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_4, %log_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, -0.5), kwargs = {})
# %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%exp_4,), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_5), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %view_7), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %add_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %view_7), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_4,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_3, %log_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, -0.5), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%exp_3,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 2.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %mul_3), kwargs = {})
triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = (rindex // 256)
r3 = rindex % 256
tmp0 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (r3), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (r3), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp10 + tmp14
tmp16 = tmp6 / tmp15
tmp17 = tl_math.log(tmp15)
tmp18 = tmp16 + tmp17
tmp19 = -0.5
tmp20 = tmp18 * tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp26 = tmp25 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp26 - tmp28
tmp30 = tmp29 * tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 * tmp1
tmp34 = tmp33 * tmp1
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp36 * tmp1
tmp38 = tmp37 * tmp1
tmp39 = tmp34 + tmp38
tmp40 = tmp30 / tmp39
tmp41 = tl_math.log(tmp39)
tmp42 = tmp40 + tmp41
tmp43 = tmp42 * tmp19
tmp44 = tl_math.exp(tmp43)
tmp45 = tl.broadcast_to(tmp44, [RBLOCK])
tmp47 = triton_helpers.promote_to_tensor(tl.sum(tmp45, 0))
tmp48 = tmp2 - tmp28
tmp49 = tmp48 * tmp48
tmp50 = tmp10 + tmp38
tmp51 = tmp49 / tmp50
tmp52 = tl_math.log(tmp50)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp19
tmp55 = tl_math.exp(tmp54)
tmp56 = tl.broadcast_to(tmp55, [RBLOCK])
tmp58 = triton_helpers.promote_to_tensor(tl.sum(tmp56, 0))
tmp59 = tmp24 + tmp47
tmp60 = 2.0
tmp61 = tmp58 * tmp60
tmp62 = tmp59 - tmp61
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp62, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, add, div, add_1, log, add_2, mul, exp_2, vaa, sub_2, pow_3, add_6, div_2, add_7, log_2, add_8, mul_2, exp_4, vbb, add_9, sub_1, pow_2, add_3, div_1, add_4, log_1, add_5, mul_1, exp_3, vab, mul_3, loss], Original ATen: [aten.sub, aten.pow, aten.add, aten.div, aten.log, aten.mul, aten.exp, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0.run(buf3, arg0_1, arg2_1, arg1_1, arg3_1, 1, 1024, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class Gaussian_Distance(nn.Module):
def __init__(self, kern=1):
super(Gaussian_Distance, self).__init__()
self.kern = kern
self.avgpool = nn.AvgPool2d(kernel_size=kern, stride=kern)
def forward(self, mu_a, logvar_a, mu_b, logvar_b):
mu_a = self.avgpool(mu_a)
mu_b = self.avgpool(mu_b)
var_a = self.avgpool(torch.exp(logvar_a)) / (self.kern * self.kern)
var_b = self.avgpool(torch.exp(logvar_b)) / (self.kern * self.kern)
mu_a1 = mu_a.view(mu_a.size(0), 1, -1)
mu_a2 = mu_a.view(1, mu_a.size(0), -1)
var_a1 = var_a.view(var_a.size(0), 1, -1)
var_a2 = var_a.view(1, var_a.size(0), -1)
mu_b1 = mu_b.view(mu_b.size(0), 1, -1)
mu_b2 = mu_b.view(1, mu_b.size(0), -1)
var_b1 = var_b.view(var_b.size(0), 1, -1)
var_b2 = var_b.view(1, var_b.size(0), -1)
vaa = torch.sum(torch.exp(torch.mul(torch.add(torch.div(torch.pow(
mu_a1 - mu_a2, 2), var_a1 + var_a2), torch.log(var_a1 + var_a2)
), -0.5)))
vab = torch.sum(torch.exp(torch.mul(torch.add(torch.div(torch.pow(
mu_a1 - mu_b2, 2), var_a1 + var_b2), torch.log(var_a1 + var_b2)
), -0.5)))
vbb = torch.sum(torch.exp(torch.mul(torch.add(torch.div(torch.pow(
mu_b1 - mu_b2, 2), var_b1 + var_b2), torch.log(var_b1 + var_b2)
), -0.5)))
loss = vaa + vbb - torch.mul(vab, 2.0)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex // 256
r3 = rindex % 256
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + r3, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr3 + r3, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp10 + tmp14
tmp16 = tmp6 / tmp15
tmp17 = tl_math.log(tmp15)
tmp18 = tmp16 + tmp17
tmp19 = -0.5
tmp20 = tmp18 * tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp26 = tmp25 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp26 - tmp28
tmp30 = tmp29 * tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 * tmp1
tmp34 = tmp33 * tmp1
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp36 * tmp1
tmp38 = tmp37 * tmp1
tmp39 = tmp34 + tmp38
tmp40 = tmp30 / tmp39
tmp41 = tl_math.log(tmp39)
tmp42 = tmp40 + tmp41
tmp43 = tmp42 * tmp19
tmp44 = tl_math.exp(tmp43)
tmp45 = tl.broadcast_to(tmp44, [RBLOCK])
tmp47 = triton_helpers.promote_to_tensor(tl.sum(tmp45, 0))
tmp48 = tmp2 - tmp28
tmp49 = tmp48 * tmp48
tmp50 = tmp10 + tmp38
tmp51 = tmp49 / tmp50
tmp52 = tl_math.log(tmp50)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp19
tmp55 = tl_math.exp(tmp54)
tmp56 = tl.broadcast_to(tmp55, [RBLOCK])
tmp58 = triton_helpers.promote_to_tensor(tl.sum(tmp56, 0))
tmp59 = tmp24 + tmp47
tmp60 = 2.0
tmp61 = tmp58 * tmp60
tmp62 = tmp59 - tmp61
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp62, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0[grid(1)](buf3,
arg0_1, arg2_1, arg1_1, arg3_1, 1, 1024, num_warps=8, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf3,
class Gaussian_DistanceNew(nn.Module):
def __init__(self, kern=1):
super(Gaussian_DistanceNew, self).__init__()
self.kern = kern
self.avgpool = nn.AvgPool2d(kernel_size=kern, stride=kern)
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
LOUEY233/Toward-Mutual-Information
|
Gaussian_Distance
| false | 9,230 |
[
"MIT"
] | 0 |
cde9ce5c9920bbc9c6e39dafb61ff1dd0c97772f
|
https://github.com/LOUEY233/Toward-Mutual-Information/tree/cde9ce5c9920bbc9c6e39dafb61ff1dd0c97772f
|
Gram_StyleLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/lh/clhfjkkr7diebd2mxdfts32t7tl4bok6jmervpgm2asjty6pcsix.py
# Topologically Sorted Source Nodes: [G_1, T, mse_loss, value_1, G_4, T_1, mse_loss_1, value_2, G_7, T_2, mse_loss_2, value_3, G_10, T_3, mse_loss_3, value_4], Original ATen: [aten.div, aten.mse_loss, aten.add]
# Source node to ATen node mapping:
# G_1 => div
# G_10 => div_6
# G_4 => div_2
# G_7 => div_4
# T => div_1
# T_1 => div_3
# T_2 => div_5
# T_3 => div_7
# mse_loss => pow_1, sub
# mse_loss_1 => mean_1, pow_2, sub_1
# mse_loss_2 => mean_2, pow_3, sub_2
# mse_loss_3 => mean_3, pow_4, sub_3
# value_1 => mean
# value_2 => add_1
# value_3 => add_2
# value_4 => add_3
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, 256), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_1, 256), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_2, 256), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_3, 256), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_2, %div_3), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_4, 256), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_5, 256), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_4, %div_5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mean_2), kwargs = {})
# %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_6, 256), kwargs = {})
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_7, 256), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_6, %div_7), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_4,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mean_3), kwargs = {})
triton_per_fused_add_div_mse_loss_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {9: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 10), equal_to_1=(9,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr2 + (r0), None)
tmp12 = tl.load(in_ptr3 + (r0), None)
tmp19 = tl.load(in_ptr4 + (r0), None)
tmp21 = tl.load(in_ptr5 + (r0), None)
tmp28 = tl.load(in_ptr6 + (r0), None)
tmp30 = tl.load(in_ptr7 + (r0), None)
tmp1 = 0.00390625
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp11 = tmp10 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp20 = tmp19 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [RBLOCK])
tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0))
tmp29 = tmp28 * tmp1
tmp31 = tmp30 * tmp1
tmp32 = tmp29 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0))
tmp37 = 256.0
tmp38 = tmp9 / tmp37
tmp39 = tmp18 / tmp37
tmp40 = tmp38 + tmp39
tmp41 = tmp27 / tmp37
tmp42 = tmp40 + tmp41
tmp43 = tmp36 / tmp37
tmp44 = tmp42 + tmp43
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp44, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 0), reinterpret_tensor(arg0_1, (16, 16), (1, 16), 0), out=buf0)
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 0), reinterpret_tensor(arg1_1, (16, 16), (1, 16), 0), out=buf1)
buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G_11], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 768), reinterpret_tensor(arg1_1, (16, 16), (1, 16), 768), out=buf10)
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 256), reinterpret_tensor(arg0_1, (16, 16), (1, 16), 256), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G_5], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 256), reinterpret_tensor(arg1_1, (16, 16), (1, 16), 256), out=buf4)
buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 512), reinterpret_tensor(arg0_1, (16, 16), (1, 16), 512), out=buf6)
buf7 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G_8], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 512), reinterpret_tensor(arg1_1, (16, 16), (1, 16), 512), out=buf7)
del arg1_1
buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [G_9], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 768), reinterpret_tensor(arg0_1, (16, 16), (1, 16), 768), out=buf9)
del arg0_1
buf11 = empty_strided_cuda((), (), torch.float32)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [G_1, T, mse_loss, value_1, G_4, T_1, mse_loss_1, value_2, G_7, T_2, mse_loss_2, value_3, G_10, T_3, mse_loss_3, value_4], Original ATen: [aten.div, aten.mse_loss, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_0.run(buf12, buf0, buf1, buf3, buf4, buf6, buf7, buf9, buf10, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf10
del buf3
del buf4
del buf6
del buf7
del buf9
return (buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def gram_matrix(input):
a, b, c, d = input.size()
features = input.view(a * b, c * d)
G = torch.mm(features, features.t())
return G / (a * b * c * d)
class Gram_StyleLoss(nn.Module):
def __init__(self):
super(Gram_StyleLoss, self).__init__()
def forward(self, input, target):
value = torch.tensor(0.0).type_as(input[0])
for in_m, in_n in zip(input, target):
G = gram_matrix(in_m)
T = gram_matrix(in_n)
value += F.mse_loss(G, T)
return value
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp12 = tl.load(in_ptr3 + r0, None)
tmp19 = tl.load(in_ptr4 + r0, None)
tmp21 = tl.load(in_ptr5 + r0, None)
tmp28 = tl.load(in_ptr6 + r0, None)
tmp30 = tl.load(in_ptr7 + r0, None)
tmp1 = 0.00390625
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp11 = tmp10 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp20 = tmp19 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [RBLOCK])
tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0))
tmp29 = tmp28 * tmp1
tmp31 = tmp30 * tmp1
tmp32 = tmp29 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0))
tmp37 = 256.0
tmp38 = tmp9 / tmp37
tmp39 = tmp18 / tmp37
tmp40 = tmp38 + tmp39
tmp41 = tmp27 / tmp37
tmp42 = tmp40 + tmp41
tmp43 = tmp36 / tmp37
tmp44 = tmp42 + tmp43
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp44, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 0),
reinterpret_tensor(arg0_1, (16, 16), (1, 16), 0), out=buf0)
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 0),
reinterpret_tensor(arg1_1, (16, 16), (1, 16), 0), out=buf1)
buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 768
), reinterpret_tensor(arg1_1, (16, 16), (1, 16), 768), out=buf10)
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 256
), reinterpret_tensor(arg0_1, (16, 16), (1, 16), 256), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 256
), reinterpret_tensor(arg1_1, (16, 16), (1, 16), 256), out=buf4)
buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 512
), reinterpret_tensor(arg0_1, (16, 16), (1, 16), 512), out=buf6)
buf7 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg1_1, (16, 16), (16, 1), 512
), reinterpret_tensor(arg1_1, (16, 16), (1, 16), 512), out=buf7)
del arg1_1
buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (16, 16), (16, 1), 768
), reinterpret_tensor(arg0_1, (16, 16), (1, 16), 768), out=buf9)
del arg0_1
buf11 = empty_strided_cuda((), (), torch.float32)
buf12 = buf11
del buf11
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_0[grid(1)](buf12, buf0, buf1,
buf3, buf4, buf6, buf7, buf9, buf10, 1, 256, num_warps=2,
num_stages=1)
del buf0
del buf1
del buf10
del buf3
del buf4
del buf6
del buf7
del buf9
return buf12,
def gram_matrix(input):
a, b, c, d = input.size()
features = input.view(a * b, c * d)
G = torch.mm(features, features.t())
return G / (a * b * c * d)
class Gram_StyleLossNew(nn.Module):
def __init__(self):
super(Gram_StyleLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Holmes-Alan/TxST
|
Gram_StyleLoss
| false | 9,231 |
[
"MIT"
] | 0 |
c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
https://github.com/Holmes-Alan/TxST/tree/c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
GLU
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6v/c6vzcw3gyn5uqhyxbbwmpum2zzhvhs66tjq2oznzcap5zo7izpvb.py
# Topologically Sorted Source Nodes: [gate, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# gate => sigmoid
# mul => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_3), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate, mul], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class GLU(nn.Module):
"""
The Gated Linear Unit GLU(a,b) = mult(a,sigmoid(b)) is common in NLP
architectures like the Gated CNN. Here sigmoid(b) corresponds to a gate
that controls what information from a is passed to the following layer.
Args:
input_size (int): number defining input and output size of the gate
"""
def __init__(self, input_size):
super().__init__()
self.a = nn.Linear(input_size, input_size)
self.sigmoid = nn.Sigmoid()
self.b = nn.Linear(input_size, input_size)
def forward(self, x):
"""
Args:
x (torch.tensor): tensor passing through the gate
"""
gate = self.sigmoid(self.b(x))
x = self.a(x)
return torch.mul(gate, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1
class GLUNew(nn.Module):
"""
The Gated Linear Unit GLU(a,b) = mult(a,sigmoid(b)) is common in NLP
architectures like the Gated CNN. Here sigmoid(b) corresponds to a gate
that controls what information from a is passed to the following layer.
Args:
input_size (int): number defining input and output size of the gate
"""
def __init__(self, input_size):
super().__init__()
self.a = nn.Linear(input_size, input_size)
self.sigmoid = nn.Sigmoid()
self.b = nn.Linear(input_size, input_size)
def forward(self, input_0):
primals_1 = self.a.weight
primals_2 = self.a.bias
primals_4 = self.b.weight
primals_5 = self.b.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
KalleBylin/tft_webapp
|
GLU
| false | 9,232 |
[
"Apache-2.0"
] | 0 |
008f109e77f8bada417655dab482f340adb8cb6b
|
https://github.com/KalleBylin/tft_webapp/tree/008f109e77f8bada417655dab482f340adb8cb6b
|
QuickGELU
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/vb/cvbzgl2kevabwrh6e5ay2dasnq6gi3a7zeveknxks7jkbuq64ime.py
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.702), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class QuickGELU(nn.Module):
def forward(self, x: 'torch.Tensor'):
return x * torch.sigmoid(1.702 * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class QuickGELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Holmes-Alan/TxST
|
QuickGELU
| false | 9,233 |
[
"MIT"
] | 0 |
c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
https://github.com/Holmes-Alan/TxST/tree/c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
ScalarMix
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/2d/c2durcbnmxziv6ccmw4s5yb6zlayyf7itflrryxft2ddkko7o6q4.py
# Topologically Sorted Source Nodes: [mul, weighted_sum, mul_1], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# weighted_sum => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %add), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tmp8 + tmp9
tmp11 = tmp1 * tmp10
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, weighted_sum, mul_1], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_3, primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0)
return (buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class ScalarMix(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers: 'int', dropout: 'float'=0.0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, tensors):
"""
Args:
tensors (list[~torch.Tensor]):
:math:`N` tensors to be mixed.
Returns:
The mixture of :math:`N` tensors.
"""
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tmp8 + tmp9
tmp11 = tmp1 * tmp10
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(64)](primals_3, primals_1,
primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4,
4), (16, 4, 1), 0)
class ScalarMixNew(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers: 'int', dropout: 'float'=0.0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, input_0):
primals_1 = self.weights
primals_3 = self.gamma
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KoichiYasuoka/diaparser
|
ScalarMix
| false | 9,234 |
[
"MIT"
] | 0 |
ca11e65ef890cee2fbb23f42ae9c711c89767158
|
https://github.com/KoichiYasuoka/diaparser/tree/ca11e65ef890cee2fbb23f42ae9c711c89767158
|
Net
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zt/czt2ssjrvqoi47t3l7sgvnwmsl2z44ssifalp5lelhoaqit7qkjk.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jq/cjqcnpaa3l4qbgseb6g76lnbktmrwvyhksgxwgikrhznwn7uoa6u.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cr/ccr7aze7brdcpnn4nu5hjbdstc7hnr3bi2oml3ziovcyrsa7cgcm.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (3600*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + (32*x2) + (115200*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ou/couxgazdxyypsu4pknhlzgo4nfrm7zwjpanc5j4d76s67ib3oc7g.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32) % 30
x2 = (xindex // 960)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (3840*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (3840*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (1920 + x0 + (64*x1) + (3840*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (1952 + x0 + (64*x1) + (3840*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xd/cxdhbeokib5mzr2h7i5dr6ubl6v3adremdmlwynoj33boj4kttek.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 173056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/oh/cohdx5kwtu7pghrblcvw34rv3obasxci62ilfyn3jxgnavzrcv7y.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 13
x2 = (xindex // 832)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (3328*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (3328*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (1664 + x0 + (128*x1) + (3328*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (1728 + x0 + (128*x1) + (3328*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/s3/cs3s4xnrmhybx636lujzgofloctrqylufum5bslufeoqggo2wftn.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 41472
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/a7/ca7ynzcdj3fioare7gwform27bqwqz6ywzqusx3676jejh754aux.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 4
y1 = (yindex // 4) % 4
y2 = (yindex // 16)
y4 = yindex
y5 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x3 + (256*y0) + (2304*y1) + (10368*y2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x3 + (256*y0) + (2304*y1) + (10368*y2)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1152 + x3 + (256*y0) + (2304*y1) + (10368*y2)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1280 + x3 + (256*y0) + (2304*y1) + (10368*y2)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + (128*y4)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y5 + (16*x3) + (2048*y2)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/2w/c2wd4y4pyj62lko4eis4jipwvem6wsxnsduqyviejxhn45b5lipb.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gt/cgtej2iywqsjair6bfzlewn45p3vbbfj2zl3fpqhzns6uc776htl.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_9 = async_compile.triton('triton_poi_fused__softmax_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (512, 512), (512, 1))
assert_size_stride(primals_9, (512, ), (1, ))
assert_size_stride(primals_10, (2, 512), (512, 1))
assert_size_stride(primals_11, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 2048, 25, grid=grid(2048, 25), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf3 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf2, primals_2, buf3, 128, 3600, grid=grid(128, 3600), stream=stream0)
del buf2
del primals_2
buf4 = empty_strided_cuda((4, 32, 30, 30), (28800, 1, 960, 32), torch.float32)
buf5 = empty_strided_cuda((4, 32, 30, 30), (28800, 1, 960, 32), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf3, buf4, buf5, 115200, grid=grid(115200), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 26, 26), (43264, 1, 1664, 64))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf7, primals_5, 173056, grid=grid(173056), stream=stream0)
del primals_5
buf8 = empty_strided_cuda((4, 64, 13, 13), (10816, 1, 832, 64), torch.float32)
buf9 = empty_strided_cuda((4, 64, 13, 13), (10816, 1, 832, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf7, buf8, buf9, 43264, grid=grid(43264), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 9, 9), (10368, 1, 1152, 128))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf11, primals_7, 41472, grid=grid(41472), stream=stream0)
del primals_7
buf12 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.int8)
buf13 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf11, buf12, buf13, 64, 128, grid=grid(64, 128), stream=stream0)
buf14 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf13, (16, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 512), (1, 512), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_8.run(buf15, primals_9, 8192, grid=grid(8192), stream=stream0)
del primals_9
buf16 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf15, reinterpret_tensor(primals_10, (512, 2), (1, 512), 0), alpha=1, beta=1, out=buf16)
del primals_11
buf17 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_9.run(buf16, buf17, 32, grid=grid(32), stream=stream0)
del buf16
return (buf17, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7, buf8, buf9, buf11, buf12, reinterpret_tensor(buf13, (16, 512), (512, 1), 0), buf15, buf17, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((512, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv2 = nn.Conv2d(32, 64, 5)
self.conv3 = nn.Conv2d(64, 128, 5)
x = torch.randn(50, 50).view(-1, 1, 50, 50)
self._to_linear = None
self.convs(x)
self.fc1 = nn.Linear(self._to_linear, 512)
self.fc2 = nn.Linear(512, 2)
def convs(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self._to_linear is None:
self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self._to_linear)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 3600 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 32 * x2 + 115200 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32 % 30
x2 = xindex // 960
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 3840 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 3840 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1920 + x0 + 64 * x1 + 3840 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (1952 + x0 + 64 * x1 + 3840 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 173056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 13
x2 = xindex // 832
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3328 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3328 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1664 + x0 + 128 * x1 + 3328 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (1728 + x0 + 128 * x1 + 3328 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 41472
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 4
y1 = yindex // 4 % 4
y2 = yindex // 16
y4 = yindex
y5 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x3 + 256 * y0 + 2304 * y1 + 10368 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x3 + 256 * y0 + 2304 * y1 + 10368 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1152 + x3 + 256 * y0 + 2304 * y1 + 10368 * y2
), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1280 + x3 + 256 * y0 + 2304 * y1 + 10368 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + 128 * y4), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y5 + 16 * x3 + 2048 * y2), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (512, 512), (512, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (2, 512), (512, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 25)](primals_4, buf0, 2048, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf3 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(128, 3600)](buf2,
primals_2, buf3, 128, 3600, XBLOCK=8, YBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del primals_2
buf4 = empty_strided_cuda((4, 32, 30, 30), (28800, 1, 960, 32),
torch.float32)
buf5 = empty_strided_cuda((4, 32, 30, 30), (28800, 1, 960, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(115200)](buf3, buf4,
buf5, 115200, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 26, 26), (43264, 1, 1664, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(173056)](buf7, primals_5,
173056, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf8 = empty_strided_cuda((4, 64, 13, 13), (10816, 1, 832, 64),
torch.float32)
buf9 = empty_strided_cuda((4, 64, 13, 13), (10816, 1, 832, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(43264)](buf7, buf8,
buf9, 43264, XBLOCK=512, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 9, 9), (10368, 1, 1152, 128))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_6[grid(41472)](buf11, primals_7,
41472, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.int8)
buf13 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_7[grid(64, 128)](buf11,
buf12, buf13, 64, 128, XBLOCK=4, YBLOCK=64, num_warps=4,
num_stages=1)
buf14 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (16, 512), (512, 1), 0),
reinterpret_tensor(primals_8, (512, 512), (1, 512), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_relu_8[grid(8192)](buf15, primals_9, 8192, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf16 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_11, buf15, reinterpret_tensor(
primals_10, (512, 2), (1, 512), 0), alpha=1, beta=1, out=buf16)
del primals_11
buf17 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
triton_poi_fused__softmax_9[grid(32)](buf16, buf17, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf16
return (buf17, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7,
buf8, buf9, buf11, buf12, reinterpret_tensor(buf13, (16, 512), (512,
1), 0), buf15, buf17, primals_10, primals_8)
class NetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv2 = nn.Conv2d(32, 64, 5)
self.conv3 = nn.Conv2d(64, 128, 5)
x = torch.randn(50, 50).view(-1, 1, 50, 50)
self._to_linear = None
self.convs(x)
self.fc1 = nn.Linear(self._to_linear, 512)
self.fc2 = nn.Linear(512, 2)
def convs(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self._to_linear is None:
self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
JSONLewis/TOHM
|
Net
| false | 9,235 |
[
"MIT"
] | 0 |
ba40fdfe0a1c515aca7f57de030bdc02a7d0951e
|
https://github.com/JSONLewis/TOHM/tree/ba40fdfe0a1c515aca7f57de030bdc02a7d0951e
|
UnfoldTemporalWindows
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ba/cbadtthtiiy3yjdtdz7ewkcelxcidbd5725ebc4pboabmozfynt5.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 3
x3 = (xindex // 48)
x4 = xindex % 16
x5 = xindex
tmp0 = (-1) + x1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-4) + x4 + (4*x2) + (16*x3)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x5), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 4, 4), (192, 48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 768, grid=grid(768), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4, 3, 16), (192, 48, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class UnfoldTemporalWindows(nn.Module):
def __init__(self, window_size, window_stride, window_dilation=1):
super().__init__()
self.window_size = window_size
self.window_stride = window_stride
self.window_dilation = window_dilation
self.padding = (window_size + (window_size - 1) * (window_dilation -
1) - 1) // 2
self.unfold = nn.Unfold(kernel_size=(self.window_size, 1), dilation
=(self.window_dilation, 1), stride=(self.window_stride, 1),
padding=(self.padding, 0))
def forward(self, x):
N, C, _T, V = x.shape
x = self.unfold(x)
x = x.view(N, C, self.window_size, -1, V).permute(0, 1, 3, 2, 4
).contiguous()
x = x.view(N, C, -1, self.window_size * V)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'window_size': 4, 'window_stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16 % 3
x3 = xindex // 48
x4 = xindex % 16
x5 = xindex
tmp0 = -1 + x1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x4 + 4 * x2 + 16 * x3), tmp5 & xmask,
other=0.0)
tl.store(out_ptr0 + x5, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 4, 4), (192, 48, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(768)](arg0_1, buf0, 768, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 3, 16), (192, 48, 16, 1), 0),
class UnfoldTemporalWindowsNew(nn.Module):
def __init__(self, window_size, window_stride, window_dilation=1):
super().__init__()
self.window_size = window_size
self.window_stride = window_stride
self.window_dilation = window_dilation
self.padding = (window_size + (window_size - 1) * (window_dilation -
1) - 1) // 2
self.unfold = nn.Unfold(kernel_size=(self.window_size, 1), dilation
=(self.window_dilation, 1), stride=(self.window_stride, 1),
padding=(self.padding, 0))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IW276/IW276SS20P7
|
UnfoldTemporalWindows
| false | 9,236 |
[
"MIT"
] | 0 |
ed388c04eb8d5ea1d13b5ed4119e722552794a62
|
https://github.com/IW276/IW276SS20P7/tree/ed388c04eb8d5ea1d13b5ed4119e722552794a62
|
CrossAttN_v8
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# G => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_6, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [S_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S_1 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [F], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [H], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_6, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [H], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf4, primals_8, 256, grid=grid(256), stream=stream0)
del primals_8
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [F], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf5, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [S], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0), out=buf6)
buf9 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [S_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf6, buf9, 64, 16, grid=grid(64), stream=stream0)
del buf6
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf9, (4, 16, 16), (256, 1, 16), 0), out=buf10)
# Topologically Sorted Source Nodes: [result_2], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [result_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf12, primals_10, 256, grid=grid(256), stream=stream0)
del primals_10
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_7, primals_9, buf9, reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as Func
class CrossAttN_v8(nn.Module):
def __init__(self, in_planes, clip_dim):
super(CrossAttN_v8, self).__init__()
self.f = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
self.g = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
self.h = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
self.output = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
return
def forward(self, F_c, F_s):
b, _c = F_c.shape[0], F_c.shape[1]
F = self.f(F_c)
F = F.view(b, F.shape[1], -1)
G = self.g(F_s)
G = G.view(b, G.shape[1], -1)
H = self.h(F_s)
H = H.view(b, H.shape[1], -1)
S = torch.bmm(F.permute(0, 2, 1), G)
S = Func.softmax(S, dim=-1)
result = torch.bmm(H, S.permute(0, 2, 1))
result = result.view(b, result.shape[1], F_c.shape[2], F_c.shape[3])
result = self.output(result)
return result
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'clip_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf2, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(primals_6, primals_7, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_0[grid(256)](buf4, primals_8, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
buf5 = buf0
del buf0
triton_poi_fused_convolution_0[grid(256)](buf5, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16),
0), reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0), out=buf6)
buf9 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf6, buf9, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf6
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf9, (4, 16, 16), (256, 1, 16), 0), out
=buf10)
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4,
4, 4), (64, 16, 4, 1), 0), primals_9, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_0[grid(256)](buf12, primals_10, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_10
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_7,
primals_9, buf9, reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4,
1), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0))
class CrossAttN_v8New(nn.Module):
def __init__(self, in_planes, clip_dim):
super(CrossAttN_v8New, self).__init__()
self.f = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
self.g = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
self.h = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
self.output = nn.Conv2d(in_planes, in_planes, 1, 1, 0)
return
def forward(self, input_0, input_1):
primals_2 = self.f.weight
primals_3 = self.f.bias
primals_4 = self.g.weight
primals_5 = self.g.bias
primals_7 = self.h.weight
primals_8 = self.h.bias
primals_9 = self.output.weight
primals_10 = self.output.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Holmes-Alan/TxST
|
CrossAttN_v8
| false | 9,237 |
[
"MIT"
] | 0 |
c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
https://github.com/Holmes-Alan/TxST/tree/c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
MultConst
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5l/c5lja42ucynhzaul7xv7gbc5hgzxyccxj7u735j75mxw7o725pey.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 255), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 255.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class MultConst(nn.Module):
def forward(self, input):
return 255 * input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 255.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MultConstNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JonghunBok/PyTorch-Multi-Style-Transfer
|
MultConst
| false | 9,238 |
[
"MIT"
] | 0 |
0e6744eb7d9c746ba828fc406e59d619f2e60094
|
https://github.com/JonghunBok/PyTorch-Multi-Style-Transfer/tree/0e6744eb7d9c746ba828fc406e59d619f2e60094
|
AttentionHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/rh/crha2kaczuxfca6yx2xil6kixynsv3nhmkywkesbyawp6j7nl6x5.py
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# att => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dm/cdmkcxuzpnailvibeivaikqdr4zvashgzwju7qijhq5aizlo3aor.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kt/cktghousutx6xui2sl2rvevzmb7gkacvfhntjq5n2xzeu7v57oz6.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/l3/cl3uiy4xloo7vbigx7zdxlvpfddyykkm3atlgvpj6v3rfyqkx3qh.py
# Topologically Sorted Source Nodes: [attention_weights, context_vector, context_vector_1], Original ATen: [aten._softmax, aten.mul, aten.sum]
# Source node to ATen node mapping:
# attention_weights => div, sum_1
# context_vector => mul
# context_vector_1 => sum_2
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_3), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused__softmax_mul_sum_3 = async_compile.triton('triton_poi_fused__softmax_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 512), (512, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 32768, grid=grid(32768), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 1), (1, 512), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attention_weights, context_vector, context_vector_1], Original ATen: [aten._softmax, aten.mul, aten.sum]
triton_poi_fused__softmax_mul_sum_3.run(buf5, primals_3, buf6, 64, grid=grid(64), stream=stream0)
del buf5
return (buf6, primals_3, buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class AttentionHead(nn.Module):
def __init__(self, h_size, hidden_dim=512):
super().__init__()
self.W = nn.Linear(h_size, hidden_dim)
self.V = nn.Linear(hidden_dim, 1)
def forward(self, features):
att = torch.tanh(self.W(features))
score = self.V(att)
attention_weights = torch.softmax(score, dim=1)
context_vector = attention_weights * features
context_vector = torch.sum(context_vector, dim=1)
return context_vector
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'h_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 512), (512, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(32768)](buf1, primals_2, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 512),
(512, 1), 0), reinterpret_tensor(primals_4, (512, 1), (1, 512),
0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_mul_sum_3[grid(64)](buf5, primals_3, buf6,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
return buf6, primals_3, buf1, buf3, primals_4
class AttentionHeadNew(nn.Module):
def __init__(self, h_size, hidden_dim=512):
super().__init__()
self.W = nn.Linear(h_size, hidden_dim)
self.V = nn.Linear(hidden_dim, 1)
def forward(self, input_0):
primals_1 = self.W.weight
primals_2 = self.W.bias
primals_4 = self.V.weight
primals_5 = self.V.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Leo1998-Lu/CommonLit-Readability-Prize-Silver-Medal-Solution
|
AttentionHead
| false | 9,239 |
[
"MIT"
] | 0 |
1df3282a77b5f8f45c4eef9831061cb390a63fc5
|
https://github.com/Leo1998-Lu/CommonLit-Readability-Prize-Silver-Medal-Solution/tree/1df3282a77b5f8f45c4eef9831061cb390a63fc5
|
Biaffine
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/bc/cbcett6ey62xkijoadrmiqwnmvdqa242vrdqwxiw4pvecwqjoged.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(BiaffineNew, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KoichiYasuoka/diaparser
|
Biaffine
| false | 9,240 |
[
"MIT"
] | 0 |
ca11e65ef890cee2fbb23f42ae9c711c89767158
|
https://github.com/KoichiYasuoka/diaparser/tree/ca11e65ef890cee2fbb23f42ae9c711c89767158
|
Fp32LayerNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
return (buf2, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
import torch.distributed
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(input.float(), self.normalized_shape, self.
weight.float() if self.weight is not None else None, self.bias.
float() if self.bias is not None else None, self.eps)
return output.type_as(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'normalized_shape': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return buf2, primals_1
class Fp32LayerNormNew(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DCMMC/chineseocr
|
Fp32LayerNorm
| false | 9,241 |
[
"MIT"
] | 0 |
0b8772615239ea7f212b1ab5bc75183e7e9f16b0
|
https://github.com/DCMMC/chineseocr/tree/0b8772615239ea7f212b1ab5bc75183e7e9f16b0
|
AttentionPool2d
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%permute, [0], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/3y/c3yxwtlf7q4oo5ov7royd257tecdxmuzqqag2za7buohahsb7mfs.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add]
# Source node to ATen node mapping:
# x_1 => cat
# x_2 => add
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %permute],), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %unsqueeze), kwargs = {})
triton_poi_fused_add_cat_1 = async_compile.triton('triton_poi_fused_add_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp15 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 17, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr1 + ((16*x3) + ((-1) + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uc/cucgrga44gtlwzw6ehoy4jrwzm5fghn3ljf7iugmdjhe6m7mjcas.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_6, %primals_7, %primals_8],), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((-8) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gq/cgqswernq7km7s5e6iqvidwhg6nuo5b5zp6m5py3pnbrdzepe236.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_10, 1.0), kwargs = {})
# %permute_17 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_13, [0, 2, 1]), kwargs = {})
triton_poi_fused_mul_transpose_3 = async_compile.triton('triton_poi_fused_mul_transpose_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = y0
tmp2 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1, 1], 12, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-8) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qz/cqzbvlh3tf752q5rj44eh5o54zbmctvajzgosqadhjdb5lbyqjy2.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_7, 1.0), kwargs = {})
# %permute_18 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_14, [0, 2, 1]), kwargs = {})
triton_poi_fused_mul_transpose_4 = async_compile.triton('triton_poi_fused_mul_transpose_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = 4 + y0
tmp2 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1, 1], 12, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/q4/cq45jdoeuvgpxv4zofa7tafzriebamhh2wkhnkrmvo3v75s7gbkj.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, any_1, div, eq, exp, full_default, logical_not, logical_not_1, sub, sum_1, where
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_15, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_15, -inf), kwargs = {})
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq,), kwargs = {})
# %any_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not, -1, True), kwargs = {})
# %logical_not_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_1,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 17, 17], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_1, %full_default, %div), kwargs = {})
triton_per_fused__safe_softmax_5 = async_compile.triton('triton_per_fused__safe_softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__safe_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 272
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
x2 = xindex % 68
x3 = (xindex // 68)
tmp0 = tl.load(in_ptr0 + (r1 + (17*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float("-inf")
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = (tmp14 != 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + (17*x2) + (1184*x3)), tmp23, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hp/chpbwsfm77p7craq5ckxpabfykbcahcp5xaoqi5y3wp6bfgatgpn.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# multi_head_attention_forward => bmm_1
# Graph fragment:
# %bmm_1 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_16, %view_17), kwargs = {})
triton_poi_fused_bmm_6 = async_compile.triton('triton_poi_fused_bmm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 289
x1 = (xindex // 289)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (289*(x1 % 4)) + (1184*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qb/cqb3dcbu2gzzir2rbde24vaunu7kucgqq3ftpydndkry6kdjyyqy.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 17
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (17*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (17, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_1.run(buf0, primals_1, primals_2, buf1, 272, grid=grid(272), stream=stream0)
del buf0
del primals_1
del primals_2
buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((12, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_6, primals_7, primals_8, buf4, 12, grid=grid(12), stream=stream0)
buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(buf4, (4, ), (1, ), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del buf4
buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32)
buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
triton_poi_fused_mul_transpose_3.run(buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, grid=grid(16, 17), stream=stream0)
buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0); del buf2 # reuse
buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
triton_poi_fused_mul_transpose_4.run(buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, grid=grid(16, 17), stream=stream0)
del buf3
del primals_6
del primals_7
del primals_8
buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8)
buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
triton_per_fused__safe_softmax_5.run(buf8, buf12, 272, 17, grid=grid(272), stream=stream0)
buf13 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf12, buf13, 4624, grid=grid(4624), stream=stream0)
buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14)
del buf13
buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf14, buf15, 17, 16, grid=grid(17, 16), stream=stream0)
buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_10
return (reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((17, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :]
x, _ = F.multi_head_attention_forward(query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1], num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.
weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias,
self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=
False, dropout_p=0, out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True,
training=self.training, need_weights=False)
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 17, tl.int64)
tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x2)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = y0
tl.full([1, 1], 0, tl.int64)
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp5 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]),
tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1, 1], 12, tl.int64)
tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-8 + y0, [XBLOCK, YBLOCK]),
tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = 4 + y0
tl.full([1, 1], 0, tl.int64)
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]),
tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1, 1], 12, tl.int64)
tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]),
tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask)
@triton.jit
def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 272
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
x2 = xindex % 68
x3 = xindex // 68
tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 17 * x2 + 1184 * x3), tmp23, rmask & xmask)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 289
x1 = xindex // 289
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 289 * (x1 % 4) + 1184 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 17
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 17 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (17, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2,
buf1, 272, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((12,), (1,), torch.float32)
triton_poi_fused_cat_2[grid(12)](primals_6, primals_7, primals_8,
buf4, 12, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(buf4, (4,), (1,), 8),
reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta
=1, out=buf5)
del buf4
buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32)
buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32)
triton_poi_fused_mul_transpose_3[grid(16, 17)](buf2, primals_6,
primals_7, primals_8, buf6, buf17, 16, 17, XBLOCK=32, YBLOCK=8,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0)
del buf2
buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32)
triton_poi_fused_mul_transpose_4[grid(16, 17)](buf3, primals_6,
primals_7, primals_8, buf7, buf18, 16, 17, XBLOCK=32, YBLOCK=16,
num_warps=4, num_stages=1)
del buf3
del primals_6
del primals_7
del primals_8
buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8)
buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1),
torch.float32)
triton_per_fused__safe_softmax_5[grid(272)](buf8, buf12, 272, 17,
XBLOCK=1, num_warps=2, num_stages=1)
buf13 = buf8
del buf8
triton_poi_fused_bmm_6[grid(4624)](buf12, buf13, 4624, XBLOCK=256,
num_warps=4, num_stages=1)
buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0)
del buf7
extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1,
16, 0), 0), out=buf14)
del buf13
buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_7[grid(17, 16)](buf14, buf15, 17, 16, XBLOCK
=16, YBLOCK=32, num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0)
del buf14
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
return reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor(
buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4),
(4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1,
16), 0), buf17, buf18, primals_5, primals_4, primals_3
class AttentionPool2dNew(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, input_0):
primals_2 = self.positional_embedding
primals_3 = self.k_proj.weight
primals_6 = self.k_proj.bias
primals_4 = self.q_proj.weight
primals_7 = self.q_proj.bias
primals_5 = self.v_proj.weight
primals_8 = self.v_proj.bias
primals_9 = self.c_proj.weight
primals_10 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Holmes-Alan/TxST
|
AttentionPool2d
| false | 9,242 |
[
"MIT"
] | 0 |
c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
https://github.com/Holmes-Alan/TxST/tree/c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
CmapPafHeadAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ca/ccalm3hakjc3y6pyouzotjhedyolmiuxj66frcn7j2bllcgh7gjc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (1048576*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pi/cpiqq6ydqends5jgnpbakdwghpnovztoymnjrsnfd3ycpt6b7cp7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4q/c4qmtshyh2quqp4mfbem56kakgs5tn4mkrnwi3suy4oa6joff42a.py
# Topologically Sorted Source Nodes: [conv2d, ac, conv2d_1, ap, mul, mul_1], Original ATen: [aten.convolution, aten.sigmoid, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# ac => sigmoid
# ap => tanh
# conv2d => convolution
# conv2d_1 => convolution_1
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %tanh), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_tanh_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x2), None)
tmp4 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = libdevice.tanh(tmp2)
tmp10 = tmp6 * tmp9
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(in_out_ptr1 + (x2), tmp5, None)
tl.store(out_ptr0 + (x2), tmp8, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pn/cpngdclw7lzjwmfou3d3kip6qoxuvwgrgxlt5gs2kd5udjsfz5ny.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16384*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 1024, 4096, grid=grid(1024, 4096), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf6 = buf5; del buf5 # reuse
buf4 = buf3; del buf3 # reuse
buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, ac, conv2d_1, ap, mul, mul_1], Original ATen: [aten.convolution, aten.sigmoid, aten.tanh, aten.mul]
triton_poi_fused_convolution_mul_sigmoid_tanh_2.run(buf6, buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, grid=grid(4194304), stream=stream0)
del primals_3
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4))
buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf8, primals_7, buf9, 16, 4096, grid=grid(16, 4096), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4))
buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf11, primals_9, buf12, 16, 4096, grid=grid(16, 4096), stream=stream0)
del buf11
del primals_9
return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6, buf7, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn
import torch.optim
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHeadAttention(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHeadAttention, self).__init__()
self.cmap_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.paf_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.cmap_att = torch.nn.Conv2d(upsample_channels,
upsample_channels, kernel_size=3, stride=1, padding=1)
self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels,
kernel_size=3, stride=1, padding=1)
self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x):
xc = self.cmap_up(x)
ac = torch.sigmoid(self.cmap_att(xc))
xp = self.paf_up(x)
ap = torch.tanh(self.paf_att(xp))
return self.cmap_conv(xc * ac), self.paf_conv(xp * ap)
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x2, None)
tmp4 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x2, None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = libdevice.tanh(tmp2)
tmp10 = tmp6 * tmp9
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(in_out_ptr1 + x2, tmp5, None)
tl.store(out_ptr0 + x2, tmp8, None)
tl.store(out_ptr1 + x2, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_4, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf6 = buf5
del buf5
buf4 = buf3
del buf3
buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384,
256), torch.float32)
triton_poi_fused_convolution_mul_sigmoid_tanh_2[grid(4194304)](buf6,
buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_3
del primals_5
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4))
buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(16, 4096)](buf8, primals_7,
buf9, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4))
buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64,
1), 0)
del buf8
triton_poi_fused_convolution_3[grid(16, 4096)](buf11, primals_9,
buf12, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf11
del primals_9
return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6,
buf7, buf10)
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHeadAttentionNew(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHeadAttentionNew, self).__init__()
self.cmap_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.paf_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.cmap_att = torch.nn.Conv2d(upsample_channels,
upsample_channels, kernel_size=3, stride=1, padding=1)
self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels,
kernel_size=3, stride=1, padding=1)
self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, input_0):
primals_2 = self.cmap_att.weight
primals_3 = self.cmap_att.bias
primals_4 = self.paf_att.weight
primals_5 = self.paf_att.bias
primals_6 = self.cmap_conv.weight
primals_7 = self.cmap_conv.bias
primals_8 = self.paf_conv.weight
primals_9 = self.paf_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
KeithStoke/POSE_Test
|
CmapPafHeadAttention
| false | 9,243 |
[
"MIT"
] | 0 |
581aaf6f3d4fd50e56aa16c43913292af7d36879
|
https://github.com/KeithStoke/POSE_Test/tree/581aaf6f3d4fd50e56aa16c43913292af7d36879
|
UNet
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/oi/coi4geilgwlqjngeai6hg4iruvpzcrvvpmho77vuyz3ppbade2sa.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16516096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64516) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7o/c7ohemjzrjpmo5tghcbldpg3s3xexqjehsqi5j3see7mnjgihu4y.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16257024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 63504) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xi/cxil7wfmjnrkoslosnjnwmoqebyussbsklzthwtzwxz2o74l47gs.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4064256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 126
x3 = (xindex // 126)
x2 = (xindex // 15876)
x4 = xindex % 15876
tmp0 = tl.load(in_ptr0 + ((2*x0) + (504*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (504*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (252 + (2*x0) + (504*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (253 + (2*x0) + (504*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (15904*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (16000*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mo/cmozurfjru5huadwzrh5yjoitgoxelkjngdyjsgtehbyvs24isqk.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_4 => convolution_2
# x_5 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7872512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 15376) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7z/c7zdlcnndhik4fhrd3ma2afaitw5ju2jqj5dhl26ipgwz2hgiher.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_6 => convolution_3
# x_7 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7620608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 14884) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bb/cbbeqfykl2wgg3c65cet5cy67zimq7orfpepzs3vr2rlhu74t6er.py
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1905152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 61
x3 = (xindex // 61)
x2 = (xindex // 3721)
x4 = xindex % 3721
tmp0 = tl.load(in_ptr0 + ((2*x0) + (244*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (244*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (122 + (2*x0) + (244*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (123 + (2*x0) + (244*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (3744*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (3840*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/al/cale6iof6zxwoy6xmrfdcnforqmwzzvzi3cb4g6s4jjxqw3hb7np.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_8 => convolution_4
# x_9 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3564544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3481) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/o2/co2av5pccpjevh6yky7hhdrsglymtzut2q3artudzrezlirfrzcg.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_10 => convolution_5
# x_11 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3326976
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3249) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kt/cktc65bbrriywtquenbdrltvs6witq3gtcn3rvhoj2hholh5ihhi.py
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_2 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 802816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 28
x1 = (xindex // 28) % 28
x2 = (xindex // 784)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (57 + (2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (58 + (2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hi/chiqbenmv4ahnhr5zzytacsmj2bam534skwucufjcoevmj65liid.py
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_12 => convolution_6
# x_13 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1384448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 676) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4n/c4ng7b3xlqdmz4wf6padqdaqd63cmlob5sujzf4en6rjcglhiane.py
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_14 => convolution_7
# x_15 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1179648
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 576) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/35/c356cd7zgpicezkybpa4roi4bdkpka2o2mqfytmksv4niqhdjpg5.py
# Topologically Sorted Source Nodes: [max_pool2d_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_3 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/h4/ch4ssn455dn4jzxavkj54g5nkxgb7fualnfv6kxozp5tbwrysclf.py
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_16 => convolution_8
# x_17 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 409600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 100) % 1024
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ym/cymeve4n2tfhbdp57kl6vbua7sg3rraw7fbbupi7awglsnzhjnth.py
# Topologically Sorted Source Nodes: [x_18, x_19], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_18 => convolution_9
# x_19 => relu_9
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {})
triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 1024
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/g2/cg2m4cjaoijmslxcqpdriumxex3jthbggayhwvzgr2fp4jr2xoct.py
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# up_1 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_14 = async_compile.triton('triton_poi_fused__to_copy_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kj/ckjtbjd57qolhrmtahgjvgra32ws3y6k2py4qtzmi63emr64mpos.py
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# up_1 => add, clamp_max
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 7), kwargs = {})
triton_poi_fused_add_clamp_15 = async_compile.triton('triton_poi_fused_add_clamp_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 7, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bw/cbwtnrcuwwmine5iorv4m5uhjwildqcnmybfugynwzc3tslmxdgi.py
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# up_1 => clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0.4666666666666667), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_16 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_16(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/am/camdbf62qsjodxp4vui7qdhsz63wpevku7c42zr5voumc5pfamz7.py
# Topologically Sorted Source Nodes: [up, up_1], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# up => convolution_10
# up_1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, mul_2, mul_3, mul_4, sub_1, sub_2, sub_4
# Graph fragment:
# %convolution_10 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_10, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_10, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_10, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_10, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_17 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x5 = (xindex // 256)
x2 = (xindex // 256) % 512
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (8*tmp4) + (64*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (8*tmp4) + (64*x5)), None, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (8*tmp25) + (64*x5)), None, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (8*tmp25) + (64*x5)), None, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + (x6), tmp21, None)
tl.store(out_ptr1 + (x6), tmp35, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/j6/cj6fumma62jtj7xlgosx37oeojvmvcdogde4lgj4lvmprc3nh554.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# y => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%constant_pad_nd, %constant_pad_nd_1], 1), kwargs = {})
triton_poi_fused_cat_18 = async_compile.triton('triton_poi_fused_cat_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_18(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3211264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 784) % 1024
x1 = (xindex // 28) % 28
x0 = xindex % 28
x3 = (xindex // 802816)
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 24, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = (-2) + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + ((-50) + x0 + (24*x1) + (576*x2) + (294912*x3)), tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 1024, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = (-6) + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 16, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = (-6) + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + ((-102) + x0 + (16*x1) + (256*((-512) + x2)) + (131072*x3)), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + ((-102) + x0 + (16*x1) + (256*((-512) + x2)) + (131072*x3)), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + (x6), tmp40, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ah/cahkyn4sw4pzflccaqi67oh5664xbvsh66vbfhzz4nap6xhmcfos.py
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# up_4 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_19 = async_compile.triton('triton_poi_fused__to_copy_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_19(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.48936170212765956
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xi/cxic34thfzspjsgyjqxbmh6gaz7ugvp6vs6yh4uu7btyupxzd5gq.py
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# up_4 => add_5, clamp_max_4
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_5, 23), kwargs = {})
triton_poi_fused_add_clamp_20 = async_compile.triton('triton_poi_fused_add_clamp_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_20(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.48936170212765956
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 23, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6n/c6nncmrqx3j26y4ejymrtfgrv7csnxfnx56qobcwcgwyo5naliaq.py
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# up_4 => clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_5, sub_5
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (48,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_4, 0.48936170212765956), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_5, 0.0), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_21 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_21(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.48936170212765956
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/eq/ceqkdehpwy5snfrf4qpyyo7ez6zfnl764nr5dgnig7rrv5yycnab.py
# Topologically Sorted Source Nodes: [up_3, up_4], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# up_3 => convolution_13
# up_4 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_7, add_8, mul_7, mul_8, mul_9, sub_6, sub_7, sub_9
# Graph fragment:
# %convolution_13 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_6), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_6), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %add_7), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_7), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_22 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2359296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 48) % 48
x0 = xindex % 48
x5 = (xindex // 2304)
x2 = (xindex // 2304) % 256
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 24, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (24*tmp4) + (576*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (24*tmp4) + (576*x5)), None, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (24*tmp25) + (576*x5)), None, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (24*tmp25) + (576*x5)), None, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + (x6), tmp21, None)
tl.store(out_ptr1 + (x6), tmp35, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5c/c5cn4aq74h5b35fn34e465tlgyvgb7m5rhrqhufqy67vjucpt2lq.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# y_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%constant_pad_nd_2, %constant_pad_nd_3], 1), kwargs = {})
triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7620608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 3721) % 512
x1 = (xindex // 61) % 61
x0 = xindex % 61
x3 = (xindex // 1905152)
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 57, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = (-2) + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + ((-116) + x0 + (57*x1) + (3249*x2) + (831744*x3)), tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 512, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = (-6) + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 48, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = (-6) + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + ((-294) + x0 + (48*x1) + (2304*((-256) + x2)) + (589824*x3)), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + ((-294) + x0 + (48*x1) + (2304*((-256) + x2)) + (589824*x3)), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + (x6), tmp40, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ud/cudr7becxwrhayxvgy4twmtw36dur2yzlr2n6im7t3vh6obigpeo.py
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# up_7 => convert_element_type_9
# Graph fragment:
# %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_24 = async_compile.triton('triton_poi_fused__to_copy_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 114
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49557522123893805
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ho/chocq4mxa2jyfkljxpgz7j66aolecixsnpic7djgbbj23te3emvz.py
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# up_7 => add_10, clamp_max_8
# Graph fragment:
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {})
# %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_10, 56), kwargs = {})
triton_poi_fused_add_clamp_25 = async_compile.triton('triton_poi_fused_add_clamp_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 114
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49557522123893805
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 56, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/3g/c3gvi6pzhytun7e2pobs6u22haytd6jdugrt6e3g3zsm2il6ypwa.py
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# up_7 => clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_10, sub_10
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (114,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_8, 0.49557522123893805), kwargs = {})
# %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_10, 0.0), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_26 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 114
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49557522123893805
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bb/cbbr4a2f45w2kckcum3bwlu3oyqntc7lt52o6que35wlr57fr53f.py
# Topologically Sorted Source Nodes: [up_6, up_7], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# up_6 => convolution_16
# up_7 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_12, add_13, mul_12, mul_13, mul_14, sub_11, sub_12, sub_14
# Graph fragment:
# %convolution_16 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_13, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_16, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_16, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_16, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_16, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_10), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_12), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_10), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_13), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %clamp_max_11), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_27 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_27(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 6653952
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 114) % 114
x0 = xindex % 114
x5 = (xindex // 12996)
x2 = (xindex // 12996) % 128
x4 = xindex % 12996
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 57, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (57*tmp4) + (3249*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (57*tmp4) + (3249*x5)), None, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (57*tmp25) + (3249*x5)), None, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (57*tmp25) + (3249*x5)), None, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + (x4 + (13024*x5)), tmp21, None)
tl.store(out_ptr1 + (x4 + (13024*x5)), tmp35, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cf/ccf6zb432yqegq3mo33rlgwbrjbposwbqnzspijr3uqwwnrohtuk.py
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# y_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%constant_pad_nd_4, %constant_pad_nd_5], 1), kwargs = {})
triton_poi_fused_cat_28 = async_compile.triton('triton_poi_fused_cat_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_28(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16257024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 15876) % 256
x1 = (xindex // 126) % 126
x0 = xindex % 126
x3 = (xindex // 4064256)
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 122, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = (-2) + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + ((-246) + x0 + (122*x1) + (14884*x2) + (1905152*x3)), tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 256, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = (-6) + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 114, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = (-6) + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + ((-690) + x0 + (114*x1) + (13024*((-128) + x2)) + (1667072*x3)), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + ((-690) + x0 + (114*x1) + (13024*((-128) + x2)) + (1667072*x3)), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + (x6), tmp40, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6b/c6bmihke4kvjmmonimin5iqf2l5owtisnfmxujwn66euncqmopwp.py
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# up_10 => convert_element_type_13
# Graph fragment:
# %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_29 = async_compile.triton('triton_poi_fused__to_copy_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_29(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 244
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49794238683127573
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/f3/cf3mmi6ihum72e7m3cgzwt4plrw6fkozmpvnsinv42hinztv4g4e.py
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# up_10 => add_15, clamp_max_12
# Graph fragment:
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {})
# %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 121), kwargs = {})
triton_poi_fused_add_clamp_30 = async_compile.triton('triton_poi_fused_add_clamp_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_30(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 244
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49794238683127573
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 121, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6q/c6qs74ukxhcr335sstfbzfcipfclkqsrfjds7rsaqceos2ks56sc.py
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# up_10 => clamp_max_14, clamp_min_12, clamp_min_14, convert_element_type_12, iota_6, mul_15, sub_15
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (244,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_6, torch.float32), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_12, 0.49794238683127573), kwargs = {})
# %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_15, 0.0), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0.0), kwargs = {})
# %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_31 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_31(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 244
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49794238683127573
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/26/c26l3cuzpdj6e65ngbevraq3jvcmshya5rauf2acqpfu7stjo463.py
# Topologically Sorted Source Nodes: [up_9, up_10], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# up_10 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_17, add_18, mul_17, mul_18, mul_19, sub_16, sub_17, sub_19
# up_9 => convolution_19
# Graph fragment:
# %convolution_19 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_15, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_19, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_19, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_19, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_19, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max_14), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_17), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_14), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_18), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_18, %add_17), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %clamp_max_15), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_32 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_32(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 15241216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 244) % 244
x0 = xindex % 244
x5 = (xindex // 59536)
x2 = (xindex // 59536) % 64
x4 = xindex % 59536
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 122, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (122*tmp4) + (14884*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (122*tmp4) + (14884*x5)), None, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (122*tmp25) + (14884*x5)), None, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (122*tmp25) + (14884*x5)), None, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + (x4 + (59552*x5)), tmp21, None)
tl.store(out_ptr1 + (x4 + (59552*x5)), tmp35, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/65/c65rs5r3w3a6ubiwogqsdxqagoviy5beimxot6eqavdlbw6ctaq7.py
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# y_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%constant_pad_nd_6, %constant_pad_nd_7], 1), kwargs = {})
triton_poi_fused_cat_33 = async_compile.triton('triton_poi_fused_cat_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[33554432],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_33', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_33(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 33554432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 65536) % 128
x1 = (xindex // 256) % 256
x0 = xindex % 256
x3 = (xindex // 8388608)
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 252, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = (-2) + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + ((-506) + x0 + (252*x1) + (63504*x2) + (4064256*x3)), tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 128, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = (-6) + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 244, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = (-6) + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + ((-1470) + x0 + (244*x1) + (59552*((-64) + x2)) + (3811328*x3)), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + ((-1470) + x0 + (244*x1) + (59552*((-64) + x2)) + (3811328*x3)), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + (x6), tmp40, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cx/ccxkslerd5uympqutx2sq3qawltualucrkshks5e4i3jvdocgsfb.py
# Topologically Sorted Source Nodes: [x_34, x_35, x_36], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_34 => convolution_21
# x_35 => relu_17
# x_36 => constant_pad_nd_8
# Graph fragment:
# %convolution_21 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_16, %primals_44, %primals_45, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_17 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_21,), kwargs = {})
# %constant_pad_nd_8 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu_17, [2, 2, 2, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_34 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16777216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x4 = (xindex // 65536)
x2 = (xindex // 65536) % 64
x6 = xindex
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 252, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-506) + x0 + (252*x1) + (63504*x4)), tmp10, other=0.0)
tmp12 = tl.load(in_ptr1 + (x2), tmp10, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (x6), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vi/cvi4dkwbyi6tdqqb6cbkh72tumog2lpckmntmcyjprawkjtaan3l.py
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_37 => convolution_22
# Graph fragment:
# %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_8, %primals_46, %primals_47, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_35 = async_compile.triton('triton_poi_fused_convolution_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_35', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_35(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/r2/cr2guae7qgv2b5k5c63deh3xw6trpnrebttcw3ysc43mp3xobt77.py
# Topologically Sorted Source Nodes: [x_34, x_35], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_34 => convolution_21
# x_35 => relu_17
# Graph fragment:
# %convolution_21 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_16, %primals_44, %primals_45, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_17 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_21,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_17, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_36 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_36(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16257024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 63504) % 64
x0 = xindex % 63504
x4 = (xindex // 63504)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + (63616*x4)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (512, ), (1, ))
assert_size_stride(primals_16, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (1024, ), (1, ))
assert_size_stride(primals_20, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_21, (1024, ), (1, ))
assert_size_stride(primals_22, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_29, (256, ), (1, ))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_33, (256, ), (1, ))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128, ), (1, ))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64, ), (1, ))
assert_size_stride(primals_42, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_43, (64, ), (1, ))
assert_size_stride(primals_44, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (64, ), (1, ))
assert_size_stride(primals_46, (4, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_47, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 254, 254), (4129024, 64516, 254, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16516096, grid=grid(16516096), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 252, 252), (4064256, 63504, 252, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 16257024, grid=grid(16257024), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 126, 126), (1017856, 15904, 126, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 126, 126), (1024000, 16000, 126, 1), torch.int8)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 4064256, grid=grid(4064256), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 124, 124), (1968128, 15376, 124, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf7, primals_7, 7872512, grid=grid(7872512), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 122, 122), (1905152, 14884, 122, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_9, 7620608, grid=grid(7620608), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 128, 61, 61), (479232, 3744, 61, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 61, 61), (491520, 3840, 61, 1), torch.int8)
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 1905152, grid=grid(1905152), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 59, 59), (891136, 3481, 59, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf13, primals_11, 3564544, grid=grid(3564544), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 57, 57), (831744, 3249, 57, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf15, primals_13, 3326976, grid=grid(3326976), stream=stream0)
del primals_13
buf16 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1), torch.float32)
buf17 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1), torch.int8)
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf15, buf16, buf17, 802816, grid=grid(802816), stream=stream0)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 512, 26, 26), (346112, 676, 26, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf19, primals_15, 1384448, grid=grid(1384448), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 512, 24, 24), (294912, 576, 24, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf21, primals_17, 1179648, grid=grid(1179648), stream=stream0)
del primals_17
buf22 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1), torch.float32)
buf23 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1), torch.int8)
# Topologically Sorted Source Nodes: [max_pool2d_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_11.run(buf21, buf22, buf23, 294912, grid=grid(294912), stream=stream0)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 1024, 10, 10), (102400, 100, 10, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf25, primals_19, 409600, grid=grid(409600), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 1024, 8, 8), (65536, 64, 8, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [x_18, x_19], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf27, primals_21, 262144, grid=grid(262144), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [up], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 8, 8), (32768, 64, 8, 1))
buf29 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_14.run(buf29, 16, grid=grid(16), stream=stream0)
buf30 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_15.run(buf30, 16, grid=grid(16), stream=stream0)
buf31 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_14.run(buf31, 16, grid=grid(16), stream=stream0)
buf32 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_15.run(buf32, 16, grid=grid(16), stream=stream0)
buf33 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_16.run(buf33, 16, grid=grid(16), stream=stream0)
buf35 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [up_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_16.run(buf35, 16, grid=grid(16), stream=stream0)
buf34 = empty_strided_cuda((4, 512, 16, 16), (131072, 256, 16, 1), torch.float32)
buf36 = empty_strided_cuda((4, 512, 16, 16), (131072, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [up, up_1], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_17.run(buf29, buf31, buf28, primals_23, buf32, buf33, buf30, buf35, buf34, buf36, 524288, grid=grid(524288), stream=stream0)
del buf28
del primals_23
buf37 = empty_strided_cuda((4, 1024, 28, 28), (802816, 784, 28, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_18.run(buf21, buf34, buf36, buf37, 3211264, grid=grid(3211264), stream=stream0)
del buf34
del buf36
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 26, 26), (346112, 676, 26, 1))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf39, primals_25, 1384448, grid=grid(1384448), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 24, 24), (294912, 576, 24, 1))
buf41 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf41, primals_27, 1179648, grid=grid(1179648), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [up_3], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 256, 24, 24), (147456, 576, 24, 1))
buf43 = empty_strided_cuda((48, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_19.run(buf43, 48, grid=grid(48), stream=stream0)
buf44 = empty_strided_cuda((48, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_20.run(buf44, 48, grid=grid(48), stream=stream0)
buf45 = empty_strided_cuda((48, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_19.run(buf45, 48, grid=grid(48), stream=stream0)
buf46 = empty_strided_cuda((48, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_20.run(buf46, 48, grid=grid(48), stream=stream0)
buf47 = empty_strided_cuda((48, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_21.run(buf47, 48, grid=grid(48), stream=stream0)
buf49 = empty_strided_cuda((48, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [up_4], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_21.run(buf49, 48, grid=grid(48), stream=stream0)
buf48 = empty_strided_cuda((4, 256, 48, 48), (589824, 2304, 48, 1), torch.float32)
buf50 = empty_strided_cuda((4, 256, 48, 48), (589824, 2304, 48, 1), torch.float32)
# Topologically Sorted Source Nodes: [up_3, up_4], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_22.run(buf43, buf45, buf42, primals_29, buf46, buf47, buf44, buf49, buf48, buf50, 2359296, grid=grid(2359296), stream=stream0)
del buf42
del primals_29
buf51 = empty_strided_cuda((4, 512, 61, 61), (1905152, 3721, 61, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.cat]
triton_poi_fused_cat_23.run(buf15, buf48, buf50, buf51, 7620608, grid=grid(7620608), stream=stream0)
del buf48
del buf50
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf51, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 256, 59, 59), (891136, 3481, 59, 1))
buf53 = buf52; del buf52 # reuse
# Topologically Sorted Source Nodes: [x_24, x_25], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf53, primals_31, 3564544, grid=grid(3564544), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 256, 57, 57), (831744, 3249, 57, 1))
buf55 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf55, primals_33, 3326976, grid=grid(3326976), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [up_6], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 57, 57), (415872, 3249, 57, 1))
buf57 = empty_strided_cuda((114, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_24.run(buf57, 114, grid=grid(114), stream=stream0)
buf58 = empty_strided_cuda((114, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf58, 114, grid=grid(114), stream=stream0)
buf59 = empty_strided_cuda((114, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_24.run(buf59, 114, grid=grid(114), stream=stream0)
buf60 = empty_strided_cuda((114, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf60, 114, grid=grid(114), stream=stream0)
buf61 = empty_strided_cuda((114, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_26.run(buf61, 114, grid=grid(114), stream=stream0)
buf63 = empty_strided_cuda((114, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [up_7], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_26.run(buf63, 114, grid=grid(114), stream=stream0)
buf62 = empty_strided_cuda((4, 128, 114, 114), (1667072, 13024, 114, 1), torch.float32)
buf64 = empty_strided_cuda((4, 128, 114, 114), (1667072, 13024, 114, 1), torch.float32)
# Topologically Sorted Source Nodes: [up_6, up_7], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_27.run(buf57, buf59, buf56, primals_35, buf60, buf61, buf58, buf63, buf62, buf64, 6653952, grid=grid(6653952), stream=stream0)
del buf56
del primals_35
buf65 = empty_strided_cuda((4, 256, 126, 126), (4064256, 15876, 126, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.cat]
triton_poi_fused_cat_28.run(buf9, buf62, buf64, buf65, 16257024, grid=grid(16257024), stream=stream0)
del buf62
del buf64
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 124, 124), (1968128, 15376, 124, 1))
buf67 = buf66; del buf66 # reuse
# Topologically Sorted Source Nodes: [x_28, x_29], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf67, primals_37, 7872512, grid=grid(7872512), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution]
buf68 = extern_kernels.convolution(buf67, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 122, 122), (1905152, 14884, 122, 1))
buf69 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [x_30, x_31], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf69, primals_39, 7620608, grid=grid(7620608), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [up_9], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 64, 122, 122), (952576, 14884, 122, 1))
buf71 = empty_strided_cuda((244, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_29.run(buf71, 244, grid=grid(244), stream=stream0)
buf72 = empty_strided_cuda((244, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_30.run(buf72, 244, grid=grid(244), stream=stream0)
buf73 = empty_strided_cuda((244, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_29.run(buf73, 244, grid=grid(244), stream=stream0)
buf74 = empty_strided_cuda((244, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_30.run(buf74, 244, grid=grid(244), stream=stream0)
buf75 = empty_strided_cuda((244, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_31.run(buf75, 244, grid=grid(244), stream=stream0)
buf77 = empty_strided_cuda((244, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [up_10], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_31.run(buf77, 244, grid=grid(244), stream=stream0)
buf76 = empty_strided_cuda((4, 64, 244, 244), (3811328, 59552, 244, 1), torch.float32)
buf78 = empty_strided_cuda((4, 64, 244, 244), (3811328, 59552, 244, 1), torch.float32)
# Topologically Sorted Source Nodes: [up_9, up_10], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_32.run(buf71, buf73, buf70, primals_41, buf74, buf75, buf72, buf77, buf76, buf78, 15241216, grid=grid(15241216), stream=stream0)
del buf70
del primals_41
buf79 = empty_strided_cuda((4, 128, 256, 256), (8388608, 65536, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.cat]
triton_poi_fused_cat_33.run(buf3, buf76, buf78, buf79, 33554432, grid=grid(33554432), stream=stream0)
del buf76
del buf78
# Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution]
buf80 = extern_kernels.convolution(buf79, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 64, 254, 254), (4129024, 64516, 254, 1))
buf81 = buf80; del buf80 # reuse
# Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf81, primals_43, 16516096, grid=grid(16516096), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution]
buf82 = extern_kernels.convolution(buf81, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 64, 252, 252), (4064256, 63504, 252, 1))
buf83 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_34, x_35, x_36], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_34.run(buf82, primals_45, buf83, 16777216, grid=grid(16777216), stream=stream0)
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf83, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.convolution]
triton_poi_fused_convolution_35.run(buf85, primals_47, 1048576, grid=grid(1048576), stream=stream0)
del primals_47
buf86 = empty_strided_cuda((4, 64, 252, 252), (4071424, 63616, 252, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_34, x_35], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_36.run(buf82, primals_45, buf86, 16257024, grid=grid(16257024), stream=stream0)
del buf82
del primals_45
return (buf85, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf30, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf44, buf45, buf46, buf47, buf49, buf51, buf53, buf55, buf57, buf58, buf59, buf60, buf61, buf63, buf65, buf67, buf69, buf71, buf72, buf73, buf74, buf75, buf77, buf79, buf81, buf83, buf86, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 256, 256), (262144, 65536, 256, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((4, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""
Double 3x3 conv + relu
"""
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv_1 = nn.Conv2d(in_channels, out_channels, 3)
self.conv_2 = nn.Conv2d(out_channels, out_channels, 3)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv_1(x)
x = self.relu(x)
x = self.conv_2(x)
x = self.relu(x)
return x
class UpsampleCat(nn.Module):
"""
Unsample input and concat with contracting tensor
"""
def __init__(self, ch):
super(UpsampleCat, self).__init__()
self.up_conv = nn.Conv2d(ch, ch // 2, 3, padding=1)
self.up = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
def forward(self, up, down):
up = self.up_conv(up)
up = self.up(up)
up_w, up_h = up.size()[2:4]
down_w, down_h = down.size()[2:4]
dw = down_w + 4 - up_w
dh = down_h + 4 - up_h
down = F.pad(down, (2, 2, 2, 2))
up = F.pad(up, (dw // 2, dw - dw // 2, dh // 2, dh - dh // 2))
y = torch.cat([down, up], dim=1)
return y
class UNet(nn.Module):
"""
UNet model
"""
def __init__(self, in_channels, out_channels):
super(UNet, self).__init__()
self.conv_1 = DoubleConv(in_channels, 64)
self.conv_2 = DoubleConv(64, 128)
self.conv_3 = DoubleConv(128, 256)
self.conv_4 = DoubleConv(256, 512)
self.conv_5 = DoubleConv(512, 1024)
self.down = nn.MaxPool2d(2)
self.up_1 = UpsampleCat(1024)
self.up_2 = UpsampleCat(512)
self.up_3 = UpsampleCat(256)
self.up_4 = UpsampleCat(128)
self.conv_6 = DoubleConv(1024, 512)
self.conv_7 = DoubleConv(512, 256)
self.conv_8 = DoubleConv(256, 128)
self.conv_9 = DoubleConv(128, 64)
self.out_conv = nn.Conv2d(64, out_channels, 1)
def forward(self, x):
x1 = self.conv_1(x)
x2 = self.conv_2(self.down(x1))
x3 = self.conv_3(self.down(x2))
x4 = self.conv_4(self.down(x3))
x = self.conv_5(self.down(x4))
x = self.conv_6(self.up_1(x, x4))
x = self.conv_7(self.up_2(x, x3))
x = self.conv_8(self.up_3(x, x2))
x = self.conv_9(self.up_4(x, x1))
x = F.pad(x, (2, 2, 2, 2))
x = self.out_conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 256, 256])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16516096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64516 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 63504 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4064256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 126
x3 = xindex // 126
x2 = xindex // 15876
x4 = xindex % 15876
tmp0 = tl.load(in_ptr0 + (2 * x0 + 504 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 504 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (252 + 2 * x0 + 504 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (253 + 2 * x0 + 504 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 15904 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 16000 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 15376 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 14884 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1905152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 61
x3 = xindex // 61
x2 = xindex // 3721
x4 = xindex % 3721
tmp0 = tl.load(in_ptr0 + (2 * x0 + 244 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 244 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (122 + 2 * x0 + 244 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (123 + 2 * x0 + 244 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 3744 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 3840 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3564544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3481 % 256
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3326976
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3249 % 256
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 28
x1 = xindex // 28 % 28
x2 = xindex // 784
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 114 * x1 + 3249 * x2), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 114 * x1 + 3249 * x2), None,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (57 + 2 * x0 + 114 * x1 + 3249 * x2), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (58 + 2 * x0 + 114 * x1 + 3249 * x2), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 676 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 576 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 100 % 1024
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 1024
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 7, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_16(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_17(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x5 = xindex // 256
x2 = xindex // 256 % 512
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 8 * tmp4 + 64 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 8 * tmp25 + 64 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 8 * tmp25 + 64 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + x6, tmp21, None)
tl.store(out_ptr1 + x6, tmp35, None)
@triton.jit
def triton_poi_fused_cat_18(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 784 % 1024
x1 = xindex // 28 % 28
x0 = xindex % 28
x3 = xindex // 802816
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 24, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = -2 + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + (-50 + x0 + 24 * x1 + 576 * x2 + 294912 * x3),
tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tl.full([1], 1024, tl.int64)
tmp22 = -6 + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 16, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = -6 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + (-102 + x0 + 16 * x1 + 256 * (-512 + x2) +
131072 * x3), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + (-102 + x0 + 16 * x1 + 256 * (-512 + x2) +
131072 * x3), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused__to_copy_19(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.48936170212765956
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_20(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.48936170212765956
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 23, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_21(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.48936170212765956
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_22(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 48 % 48
x0 = xindex % 48
x5 = xindex // 2304
x2 = xindex // 2304 % 256
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 24, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 24 * tmp4 + 576 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 24 * tmp4 + 576 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 24 * tmp25 + 576 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 24 * tmp25 + 576 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + x6, tmp21, None)
tl.store(out_ptr1 + x6, tmp35, None)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 3721 % 512
x1 = xindex // 61 % 61
x0 = xindex % 61
x3 = xindex // 1905152
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 57, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = -2 + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + (-116 + x0 + 57 * x1 + 3249 * x2 + 831744 *
x3), tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp22 = -6 + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 48, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = -6 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + (-294 + x0 + 48 * x1 + 2304 * (-256 + x2) +
589824 * x3), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + (-294 + x0 + 48 * x1 + 2304 * (-256 + x2) +
589824 * x3), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 114
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49557522123893805
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 114
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49557522123893805
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 56, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_26(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 114
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49557522123893805
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_27(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 114 % 114
x0 = xindex % 114
x5 = xindex // 12996
x2 = xindex // 12996 % 128
x4 = xindex % 12996
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 57, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 57 * tmp4 + 3249 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 57 * tmp4 + 3249 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 57 * tmp25 + 3249 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 57 * tmp25 + 3249 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + (x4 + 13024 * x5), tmp21, None)
tl.store(out_ptr1 + (x4 + 13024 * x5), tmp35, None)
@triton.jit
def triton_poi_fused_cat_28(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 15876 % 256
x1 = xindex // 126 % 126
x0 = xindex % 126
x3 = xindex // 4064256
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 122, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = -2 + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + (-246 + x0 + 122 * x1 + 14884 * x2 + 1905152 *
x3), tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp22 = -6 + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 114, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = -6 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + (-690 + x0 + 114 * x1 + 13024 * (-128 + x2) +
1667072 * x3), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + (-690 + x0 + 114 * x1 + 13024 * (-128 + x2) +
1667072 * x3), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused__to_copy_29(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 244
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49794238683127573
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_30(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 244
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49794238683127573
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 121, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_31(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 244
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49794238683127573
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_32(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 244 % 244
x0 = xindex % 244
x5 = xindex // 59536
x2 = xindex // 59536 % 64
x4 = xindex % 59536
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 122, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 122 * tmp4 + 14884 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 122 * tmp4 + 14884 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 122 * tmp25 + 14884 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 122 * tmp25 + 14884 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tl.store(out_ptr0 + (x4 + 59552 * x5), tmp21, None)
tl.store(out_ptr1 + (x4 + 59552 * x5), tmp35, None)
@triton.jit
def triton_poi_fused_cat_33(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 65536 % 128
x1 = xindex // 256 % 256
x0 = xindex % 256
x3 = xindex // 8388608
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 252, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = -2 + x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp9 < tmp7
tmp12 = tmp6 & tmp8
tmp13 = tmp12 & tmp10
tmp14 = tmp13 & tmp11
tmp15 = tmp14 & tmp4
tmp16 = tl.load(in_ptr0 + (-506 + x0 + 252 * x1 + 63504 * x2 + 4064256 *
x3), tmp15, other=0.0)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp22 = -6 + x1
tmp23 = tmp22 >= tmp1
tmp24 = tl.full([1], 244, tl.int64)
tmp25 = tmp22 < tmp24
tmp26 = -6 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp24
tmp29 = tmp23 & tmp25
tmp30 = tmp29 & tmp27
tmp31 = tmp30 & tmp28
tmp32 = tmp31 & tmp19
tmp33 = tl.load(in_ptr1 + (-1470 + x0 + 244 * x1 + 59552 * (-64 + x2) +
3811328 * x3), tmp32, other=0.0)
tmp34 = tl.load(in_ptr2 + (-1470 + x0 + 244 * x1 + 59552 * (-64 + x2) +
3811328 * x3), tmp32, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp19, tmp37, tmp38)
tmp40 = tl.where(tmp4, tmp18, tmp39)
tl.store(out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_34(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x4 = xindex // 65536
x2 = xindex // 65536 % 64
x6 = xindex
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 252, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-506 + x0 + 252 * x1 + 63504 * x4), tmp10,
other=0.0)
tmp12 = tl.load(in_ptr1 + x2, tmp10, eviction_policy='evict_last',
other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + x6, tmp17, None)
@triton.jit
def triton_poi_fused_convolution_35(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_36(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 63504 % 64
x0 = xindex % 63504
x4 = xindex // 63504
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 63616 * x4), tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (512,), (1,))
assert_size_stride(primals_16, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (1024,), (1,))
assert_size_stride(primals_20, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_21, (1024,), (1,))
assert_size_stride(primals_22, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_29, (256,), (1,))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_33, (256,), (1,))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128,), (1,))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_43, (64,), (1,))
assert_size_stride(primals_44, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (64,), (1,))
assert_size_stride(primals_46, (4, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_47, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 254, 254), (4129024, 64516, 254, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16516096)](buf1, primals_2,
16516096, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 252, 252), (4064256, 63504, 252, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16257024)](buf3, primals_5,
16257024, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 126, 126), (1017856, 15904, 126,
1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 126, 126), (1024000, 16000, 126,
1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(4064256)](buf3,
buf4, buf5, 4064256, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 124, 124), (1968128, 15376, 124, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(7872512)](buf7, primals_7,
7872512, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 122, 122), (1905152, 14884, 122, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(7620608)](buf9, primals_9,
7620608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 128, 61, 61), (479232, 3744, 61, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 61, 61), (491520, 3840, 61, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(1905152)](buf9,
buf10, buf11, 1905152, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 59, 59), (891136, 3481, 59, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_6[grid(3564544)](buf13,
primals_11, 3564544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 57, 57), (831744, 3249, 57, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_7[grid(3326976)](buf15,
primals_13, 3326976, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf16 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1),
torch.float32)
buf17 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(802816)](buf15,
buf16, buf17, 802816, XBLOCK=512, num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 512, 26, 26), (346112, 676, 26, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_9[grid(1384448)](buf19,
primals_15, 1384448, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 512, 24, 24), (294912, 576, 24, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_10[grid(1179648)](buf21,
primals_17, 1179648, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf22 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1),
torch.float32)
buf23 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_11[grid(294912)](buf21,
buf22, buf23, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 1024, 10, 10), (102400, 100, 10, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_12[grid(409600)](buf25,
primals_19, 409600, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 1024, 8, 8), (65536, 64, 8, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_13[grid(262144)](buf27,
primals_21, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf28 = extern_kernels.convolution(buf27, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 8, 8), (32768, 64, 8, 1))
buf29 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_14[grid(16)](buf29, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_15[grid(16)](buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_14[grid(16)](buf31, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused_add_clamp_15[grid(16)](buf32, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf33 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_16[grid(16)](buf33,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_16[grid(16)](buf35,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 512, 16, 16), (131072, 256, 16, 1),
torch.float32)
buf36 = empty_strided_cuda((4, 512, 16, 16), (131072, 256, 16, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_sub_17[grid(524288)
](buf29, buf31, buf28, primals_23, buf32, buf33, buf30, buf35,
buf34, buf36, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
del buf28
del primals_23
buf37 = empty_strided_cuda((4, 1024, 28, 28), (802816, 784, 28, 1),
torch.float32)
triton_poi_fused_cat_18[grid(3211264)](buf21, buf34, buf36, buf37,
3211264, XBLOCK=512, num_warps=8, num_stages=1)
del buf34
del buf36
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 26, 26), (346112, 676, 26, 1))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_9[grid(1384448)](buf39,
primals_25, 1384448, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf40 = extern_kernels.convolution(buf39, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 24, 24), (294912, 576, 24, 1))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_10[grid(1179648)](buf41,
primals_27, 1179648, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf42 = extern_kernels.convolution(buf41, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 256, 24, 24), (147456, 576, 24, 1))
buf43 = empty_strided_cuda((48, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_19[grid(48)](buf43, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf44 = empty_strided_cuda((48, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_20[grid(48)](buf44, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf45 = empty_strided_cuda((48,), (1,), torch.int64)
triton_poi_fused__to_copy_19[grid(48)](buf45, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf46 = empty_strided_cuda((48,), (1,), torch.int64)
triton_poi_fused_add_clamp_20[grid(48)](buf46, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf47 = empty_strided_cuda((48,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_21[grid(48)](buf47,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf49 = empty_strided_cuda((48, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_21[grid(48)](buf49,
48, XBLOCK=64, num_warps=1, num_stages=1)
buf48 = empty_strided_cuda((4, 256, 48, 48), (589824, 2304, 48, 1),
torch.float32)
buf50 = empty_strided_cuda((4, 256, 48, 48), (589824, 2304, 48, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_sub_22[grid(2359296)
](buf43, buf45, buf42, primals_29, buf46, buf47, buf44, buf49,
buf48, buf50, 2359296, XBLOCK=512, num_warps=8, num_stages=1)
del buf42
del primals_29
buf51 = empty_strided_cuda((4, 512, 61, 61), (1905152, 3721, 61, 1),
torch.float32)
triton_poi_fused_cat_23[grid(7620608)](buf15, buf48, buf50, buf51,
7620608, XBLOCK=1024, num_warps=4, num_stages=1)
del buf48
del buf50
buf52 = extern_kernels.convolution(buf51, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 256, 59, 59), (891136, 3481, 59, 1))
buf53 = buf52
del buf52
triton_poi_fused_convolution_relu_6[grid(3564544)](buf53,
primals_31, 3564544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf54 = extern_kernels.convolution(buf53, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 256, 57, 57), (831744, 3249, 57, 1))
buf55 = buf54
del buf54
triton_poi_fused_convolution_relu_7[grid(3326976)](buf55,
primals_33, 3326976, XBLOCK=512, num_warps=8, num_stages=1)
del primals_33
buf56 = extern_kernels.convolution(buf55, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 57, 57), (415872, 3249, 57, 1))
buf57 = empty_strided_cuda((114, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_24[grid(114)](buf57, 114, XBLOCK=128,
num_warps=4, num_stages=1)
buf58 = empty_strided_cuda((114, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_25[grid(114)](buf58, 114, XBLOCK=128,
num_warps=4, num_stages=1)
buf59 = empty_strided_cuda((114,), (1,), torch.int64)
triton_poi_fused__to_copy_24[grid(114)](buf59, 114, XBLOCK=128,
num_warps=4, num_stages=1)
buf60 = empty_strided_cuda((114,), (1,), torch.int64)
triton_poi_fused_add_clamp_25[grid(114)](buf60, 114, XBLOCK=128,
num_warps=4, num_stages=1)
buf61 = empty_strided_cuda((114,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_26[grid(114)](buf61,
114, XBLOCK=128, num_warps=4, num_stages=1)
buf63 = empty_strided_cuda((114, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_26[grid(114)](buf63,
114, XBLOCK=128, num_warps=4, num_stages=1)
buf62 = empty_strided_cuda((4, 128, 114, 114), (1667072, 13024, 114,
1), torch.float32)
buf64 = empty_strided_cuda((4, 128, 114, 114), (1667072, 13024, 114,
1), torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_sub_27[grid(6653952)
](buf57, buf59, buf56, primals_35, buf60, buf61, buf58, buf63,
buf62, buf64, 6653952, XBLOCK=512, num_warps=8, num_stages=1)
del buf56
del primals_35
buf65 = empty_strided_cuda((4, 256, 126, 126), (4064256, 15876, 126,
1), torch.float32)
triton_poi_fused_cat_28[grid(16257024)](buf9, buf62, buf64, buf65,
16257024, XBLOCK=1024, num_warps=4, num_stages=1)
del buf62
del buf64
buf66 = extern_kernels.convolution(buf65, primals_36, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 124, 124), (1968128, 15376, 124, 1))
buf67 = buf66
del buf66
triton_poi_fused_convolution_relu_3[grid(7872512)](buf67,
primals_37, 7872512, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_37
buf68 = extern_kernels.convolution(buf67, primals_38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 122, 122), (1905152, 14884, 122, 1))
buf69 = buf68
del buf68
triton_poi_fused_convolution_relu_4[grid(7620608)](buf69,
primals_39, 7620608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_39
buf70 = extern_kernels.convolution(buf69, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 64, 122, 122), (952576, 14884, 122, 1))
buf71 = empty_strided_cuda((244, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_29[grid(244)](buf71, 244, XBLOCK=128,
num_warps=4, num_stages=1)
buf72 = empty_strided_cuda((244, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_30[grid(244)](buf72, 244, XBLOCK=128,
num_warps=4, num_stages=1)
buf73 = empty_strided_cuda((244,), (1,), torch.int64)
triton_poi_fused__to_copy_29[grid(244)](buf73, 244, XBLOCK=128,
num_warps=4, num_stages=1)
buf74 = empty_strided_cuda((244,), (1,), torch.int64)
triton_poi_fused_add_clamp_30[grid(244)](buf74, 244, XBLOCK=128,
num_warps=4, num_stages=1)
buf75 = empty_strided_cuda((244,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_31[grid(244)](buf75,
244, XBLOCK=256, num_warps=4, num_stages=1)
buf77 = empty_strided_cuda((244, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_31[grid(244)](buf77,
244, XBLOCK=256, num_warps=4, num_stages=1)
buf76 = empty_strided_cuda((4, 64, 244, 244), (3811328, 59552, 244,
1), torch.float32)
buf78 = empty_strided_cuda((4, 64, 244, 244), (3811328, 59552, 244,
1), torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_sub_32[grid(
15241216)](buf71, buf73, buf70, primals_41, buf74, buf75, buf72,
buf77, buf76, buf78, 15241216, XBLOCK=512, num_warps=8,
num_stages=1)
del buf70
del primals_41
buf79 = empty_strided_cuda((4, 128, 256, 256), (8388608, 65536, 256,
1), torch.float32)
triton_poi_fused_cat_33[grid(33554432)](buf3, buf76, buf78, buf79,
33554432, XBLOCK=1024, num_warps=4, num_stages=1)
del buf76
del buf78
buf80 = extern_kernels.convolution(buf79, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 64, 254, 254), (4129024, 64516, 254, 1))
buf81 = buf80
del buf80
triton_poi_fused_convolution_relu_0[grid(16516096)](buf81,
primals_43, 16516096, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf82 = extern_kernels.convolution(buf81, primals_44, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 64, 252, 252), (4064256, 63504, 252, 1))
buf83 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256,
1), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_34[grid(16777216)](
buf82, primals_45, buf83, 16777216, XBLOCK=1024, num_warps=4,
num_stages=1)
buf84 = extern_kernels.convolution(buf83, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf85 = buf84
del buf84
triton_poi_fused_convolution_35[grid(1048576)](buf85, primals_47,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_47
buf86 = empty_strided_cuda((4, 64, 252, 252), (4071424, 63616, 252,
1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_36[grid(16257024)
](buf82, primals_45, buf86, 16257024, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf82
del primals_45
return (buf85, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, buf1, buf3, buf4,
buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19,
buf21, buf22, buf23, buf25, buf27, buf29, buf30, buf31, buf32,
buf33, buf35, buf37, buf39, buf41, buf43, buf44, buf45, buf46,
buf47, buf49, buf51, buf53, buf55, buf57, buf58, buf59, buf60,
buf61, buf63, buf65, buf67, buf69, buf71, buf72, buf73, buf74,
buf75, buf77, buf79, buf81, buf83, buf86)
class DoubleConv(nn.Module):
"""
Double 3x3 conv + relu
"""
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv_1 = nn.Conv2d(in_channels, out_channels, 3)
self.conv_2 = nn.Conv2d(out_channels, out_channels, 3)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv_1(x)
x = self.relu(x)
x = self.conv_2(x)
x = self.relu(x)
return x
class UpsampleCat(nn.Module):
"""
Unsample input and concat with contracting tensor
"""
def __init__(self, ch):
super(UpsampleCat, self).__init__()
self.up_conv = nn.Conv2d(ch, ch // 2, 3, padding=1)
self.up = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
def forward(self, up, down):
up = self.up_conv(up)
up = self.up(up)
up_w, up_h = up.size()[2:4]
down_w, down_h = down.size()[2:4]
dw = down_w + 4 - up_w
dh = down_h + 4 - up_h
down = F.pad(down, (2, 2, 2, 2))
up = F.pad(up, (dw // 2, dw - dw // 2, dh // 2, dh - dh // 2))
y = torch.cat([down, up], dim=1)
return y
class UNetNew(nn.Module):
"""
UNet model
"""
def __init__(self, in_channels, out_channels):
super(UNetNew, self).__init__()
self.conv_1 = DoubleConv(in_channels, 64)
self.conv_2 = DoubleConv(64, 128)
self.conv_3 = DoubleConv(128, 256)
self.conv_4 = DoubleConv(256, 512)
self.conv_5 = DoubleConv(512, 1024)
self.down = nn.MaxPool2d(2)
self.up_1 = UpsampleCat(1024)
self.up_2 = UpsampleCat(512)
self.up_3 = UpsampleCat(256)
self.up_4 = UpsampleCat(128)
self.conv_6 = DoubleConv(1024, 512)
self.conv_7 = DoubleConv(512, 256)
self.conv_8 = DoubleConv(256, 128)
self.conv_9 = DoubleConv(128, 64)
self.out_conv = nn.Conv2d(64, out_channels, 1)
def forward(self, input_0):
primals_1 = self.conv_1.conv_1.weight
primals_2 = self.conv_1.conv_1.bias
primals_4 = self.conv_1.conv_2.weight
primals_5 = self.conv_1.conv_2.bias
primals_6 = self.conv_2.conv_1.weight
primals_7 = self.conv_2.conv_1.bias
primals_8 = self.conv_2.conv_2.weight
primals_9 = self.conv_2.conv_2.bias
primals_10 = self.conv_3.conv_1.weight
primals_11 = self.conv_3.conv_1.bias
primals_12 = self.conv_3.conv_2.weight
primals_13 = self.conv_3.conv_2.bias
primals_14 = self.conv_4.conv_1.weight
primals_15 = self.conv_4.conv_1.bias
primals_16 = self.conv_4.conv_2.weight
primals_17 = self.conv_4.conv_2.bias
primals_18 = self.conv_5.conv_1.weight
primals_19 = self.conv_5.conv_1.bias
primals_20 = self.conv_5.conv_2.weight
primals_21 = self.conv_5.conv_2.bias
primals_22 = self.up_1.up_conv.weight
primals_23 = self.up_1.up_conv.bias
primals_28 = self.up_2.up_conv.weight
primals_29 = self.up_2.up_conv.bias
primals_34 = self.up_3.up_conv.weight
primals_35 = self.up_3.up_conv.bias
primals_40 = self.up_4.up_conv.weight
primals_41 = self.up_4.up_conv.bias
primals_24 = self.conv_6.conv_1.weight
primals_25 = self.conv_6.conv_1.bias
primals_26 = self.conv_6.conv_2.weight
primals_27 = self.conv_6.conv_2.bias
primals_30 = self.conv_7.conv_1.weight
primals_31 = self.conv_7.conv_1.bias
primals_32 = self.conv_7.conv_2.weight
primals_33 = self.conv_7.conv_2.bias
primals_36 = self.conv_8.conv_1.weight
primals_37 = self.conv_8.conv_1.bias
primals_38 = self.conv_8.conv_2.weight
primals_39 = self.conv_8.conv_2.bias
primals_42 = self.conv_9.conv_1.weight
primals_43 = self.conv_9.conv_1.bias
primals_44 = self.conv_9.conv_2.weight
primals_45 = self.conv_9.conv_2.bias
primals_46 = self.out_conv.weight
primals_47 = self.out_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0]
|
Aoi-hosizora/UNet-pytorch
|
UNet
| false | 9,244 |
[
"MIT"
] | 0 |
96951d5d1fdc6c6266a11e1bd97fbf72010bc87d
|
https://github.com/Aoi-hosizora/UNet-pytorch/tree/96951d5d1fdc6c6266a11e1bd97fbf72010bc87d
|
FirstOctaveConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/l3/cl3qgtljwm55hj7prrlq32vnxhqj5elf2qeptwkrprrhumnm7twn.py
# Topologically Sorted Source Nodes: [X_h2l], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# X_h2l => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [2, 2], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (2, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [X_h2l], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [X_h], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 3, 3), (18, 9, 3, 1))
# Topologically Sorted Source Nodes: [X_l], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 1, 1), (2, 1, 1, 1))
return (buf1, buf2, primals_1, primals_2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
from torch.nn import init as init
class FirstOctaveConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, alpha=0.5,
stride=1, padding=1, dilation=1, groups=1, bias=False):
super(FirstOctaveConv, self).__init__()
self.stride = stride
self.h2g_pool = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
self.h2l = torch.nn.Conv2d(in_channels, int(alpha * out_channels),
kernel_size, 1, padding, dilation, groups, bias)
self.h2h = torch.nn.Conv2d(in_channels, out_channels - int(alpha *
out_channels), kernel_size, 1, padding, dilation, groups, bias)
def forward(self, x):
if self.stride == 2:
x = self.h2g_pool(x)
X_h2l = self.h2g_pool(x)
X_h = x
X_h = self.h2h(X_h)
X_l = self.h2l(X_h2l)
return X_h, X_l
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
from torch.nn import init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (2, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](primals_1, buf0, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 3, 3), (18, 9, 3, 1))
buf2 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 1, 1), (2, 1, 1, 1))
return buf1, buf2, primals_1, primals_2, primals_3, buf0
class FirstOctaveConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, alpha=0.5,
stride=1, padding=1, dilation=1, groups=1, bias=False):
super(FirstOctaveConvNew, self).__init__()
self.stride = stride
self.h2g_pool = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
self.h2l = torch.nn.Conv2d(in_channels, int(alpha * out_channels),
kernel_size, 1, padding, dilation, groups, bias)
self.h2h = torch.nn.Conv2d(in_channels, out_channels - int(alpha *
out_channels), kernel_size, 1, padding, dilation, groups, bias)
def forward(self, input_0):
primals_2 = self.h2l.weight
primals_3 = self.h2h.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
IlikeBB/Object-Detection-for-M-NBI
|
FirstOctaveConv
| false | 9,245 |
[
"MIT"
] | 0 |
650fa1ca7b8860785f0a838dab0301a9cba121d6
|
https://github.com/IlikeBB/Object-Detection-for-M-NBI/tree/650fa1ca7b8860785f0a838dab0301a9cba121d6
|
SurfaceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jx/cjxj2qzhq52dbtnw5atwmpqodngntrpfjymsuzn66wchnycrupk5.py
# Topologically Sorted Source Nodes: [score, score_1], Original ATen: [aten.mul, aten.mean]
# Source node to ATen node mapping:
# score => mul
# score_1 => mean
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [2]), kwargs = {})
triton_per_fused_mean_mul_1 = async_compile.triton('triton_per_fused_mean_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (16 + r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (32 + r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (48 + r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (r2 + (16*x3)), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cu/ccuetxmqhlj73f2voq2wofh7jzpy4ldr3ziv4davdo2znmbts5pz.py
# Topologically Sorted Source Nodes: [score, score_1, score_2], Original ATen: [aten.mul, aten.mean]
# Source node to ATen node mapping:
# score => mul
# score_1 => mean
# score_2 => mean_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [2]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [1]), kwargs = {})
triton_poi_fused_mean_mul_2 = async_compile.triton('triton_poi_fused_mean_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 / tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 / tmp1
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score, score_1], Original ATen: [aten.mul, aten.mean]
triton_per_fused_mean_mul_1.run(buf0, arg1_1, buf1, 16, 16, grid=grid(16), stream=stream0)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [score, score_1, score_2], Original ATen: [aten.mul, aten.mean]
triton_poi_fused_mean_mul_2.run(buf1, buf2, 4, grid=grid(4), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class SurfaceLoss(nn.Module):
def __init__(self, epsilon=1e-05, softmax=True):
super(SurfaceLoss, self).__init__()
self.weight_map = []
def forward(self, x, distmap):
x = torch.softmax(x, dim=1)
self.weight_map = distmap
score = x.flatten(start_dim=2) * distmap.flatten(start_dim=2)
score = torch.mean(score, dim=2)
score = torch.mean(score, dim=1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused_mean_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (16 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (32 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (48 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_mean_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 / tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 / tmp1
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_mean_mul_1[grid(16)](buf0, arg1_1, buf1, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mean_mul_2[grid(4)](buf1, buf2, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf1
return buf2,
class SurfaceLossNew(nn.Module):
def __init__(self, epsilon=1e-05, softmax=True):
super(SurfaceLossNew, self).__init__()
self.weight_map = []
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
KamranBinaee/RGnet
|
SurfaceLoss
| false | 9,246 |
[
"MIT"
] | 0 |
85861ab47a94018c8f8fa01fb7e64d8eec7fdc43
|
https://github.com/KamranBinaee/RGnet/tree/85861ab47a94018c8f8fa01fb7e64d8eec7fdc43
|
MLP
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/bd/cbdotac6ukup5jfyef3gol4xzuff4mzk4u5pqfhzizchbce25ivd.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x *= mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLP(nn.Module):
"""
Applies a linear transformation together with :class:`~torch.nn.LeakyReLU` activation to the incoming tensor:
:math:`y = \\mathrm{LeakyReLU}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
"""
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1)
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x *= mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLPNew(nn.Module):
"""
Applies a linear transformation together with :class:`~torch.nn.LeakyReLU` activation to the incoming tensor:
:math:`y = \\mathrm{LeakyReLU}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
"""
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1)
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KoichiYasuoka/diaparser
|
MLP
| false | 9,247 |
[
"MIT"
] | 0 |
ca11e65ef890cee2fbb23f42ae9c711c89767158
|
https://github.com/KoichiYasuoka/diaparser/tree/ca11e65ef890cee2fbb23f42ae9c711c89767158
|
DiceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/vh/cvhcrsxucgh7eot2p772apvh6wg7qihujnij7ewp3yqeqgpnmix6.py
# Topologically Sorted Source Nodes: [input_1, min_1, ne, mask, mul, sum_1, max_1, mul_1, sum_2, clamp, truediv, sub], Original ATen: [aten.sigmoid, aten.minimum, aten.ne, aten._to_copy, aten.mul, aten.sum, aten.maximum, aten.clamp, aten.div, aten.rsub]
# Source node to ATen node mapping:
# clamp => clamp_min
# input_1 => sigmoid
# mask => convert_element_type
# max_1 => maximum
# min_1 => minimum
# mul => mul
# mul_1 => mul_1
# ne => ne
# sub => sub
# sum_1 => sum_1
# sum_2 => sum_2
# truediv => div
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view,), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%sigmoid, %view_1), kwargs = {})
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%view_1, -1), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum, %convert_element_type), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%sigmoid, %view_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%maximum, %convert_element_type), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_2, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %clamp_min), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = triton_helpers.minimum(tmp1, tmp2)
tmp4 = -1.0
tmp5 = tmp2 != tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = triton_helpers.maximum(tmp1, tmp2)
tmp12 = tmp11 * tmp6
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp10 / tmp17
tmp19 = tmp16 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_1, min_1, ne, mask, mul, sum_1, max_1, mul_1, sum_2, clamp, truediv, sub], Original ATen: [aten.sigmoid, aten.minimum, aten.ne, aten._to_copy, aten.mul, aten.sum, aten.maximum, aten.clamp, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, ignore_target=-1):
super().__init__()
self.ignore_target = ignore_target
def forward(self, input, target):
"""
:param input: (N), logit
:param target: (N), {0, 1}
:return:
"""
input = torch.sigmoid(input.view(-1))
target = target.float().view(-1)
mask = (target != self.ignore_target).float()
return 1.0 - (torch.min(input, target) * mask).sum() / torch.clamp((
torch.max(input, target) * mask).sum(), min=1.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = triton_helpers.minimum(tmp1, tmp2)
tmp4 = -1.0
tmp5 = tmp2 != tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = triton_helpers.maximum(tmp1, tmp2)
tmp12 = tmp11 * tmp6
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp10 / tmp17
tmp19 = tmp16 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_clamp_div_maximum_minimum_mul_ne_rsub_sigmoid_sum_0[
grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class DiceLossNew(nn.Module):
def __init__(self, ignore_target=-1):
super().__init__()
self.ignore_target = ignore_target
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LorenzLamm/Pointnet2.PyTorch
|
DiceLoss
| false | 9,248 |
[
"MIT"
] | 0 |
d15862b282c93cedbc08ea14622793f66429af21
|
https://github.com/LorenzLamm/Pointnet2.PyTorch/tree/d15862b282c93cedbc08ea14622793f66429af21
|
ChamferLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/v7/cv7duh3vx2zbvzgykpxoo3zrorbpe24v6abbzgskhj2f2lhowmlt.py
# Topologically Sorted Source Nodes: [add, mul, P, min_1, min_2, loss_1, loss_2, add_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.min, aten.sum]
# Source node to ATen node mapping:
# P => sub
# add => add
# add_1 => add_1
# loss_1 => sum_1
# loss_2 => sum_2
# min_1 => min_1
# min_2 => min_2
# mul => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_4, %expand_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm_2, 2), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%sub, 1), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%getitem,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%getitem_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_add_min_mul_sub_sum_0 = async_compile.triton('triton_per_fused_add_min_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_min_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_min_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 4)
r0 = rindex % 4
r2 = rindex
tmp0 = tl.load(in_ptr0 + (16*r1), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((5*r0) + (16*r1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (r0 + (16*r1)), None)
tmp7 = tl.load(in_ptr0 + (5 + (16*r1)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + r0 + (16*r1)), None)
tmp13 = tl.load(in_ptr0 + (10 + (16*r1)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + r0 + (16*r1)), None)
tmp19 = tl.load(in_ptr0 + (15 + (16*r1)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (12 + r0 + (16*r1)), None)
tmp28 = tl.load(in_ptr0 + ((5*r0) + (16*r1)), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (16*r1), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (4*r2), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr1 + (5 + (16*r1)), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr2 + (1 + (4*r2)), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr1 + (10 + (16*r1)), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr2 + (2 + (4*r2)), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr1 + (15 + (16*r1)), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr2 + (3 + (4*r2)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 * tmp4
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.minimum(tmp6, tmp11)
tmp14 = tmp13 + tmp1
tmp16 = tmp15 * tmp4
tmp17 = tmp14 - tmp16
tmp18 = triton_helpers.minimum(tmp12, tmp17)
tmp20 = tmp19 + tmp1
tmp22 = tmp21 * tmp4
tmp23 = tmp20 - tmp22
tmp24 = triton_helpers.minimum(tmp18, tmp23)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp30 = tmp28 + tmp29
tmp32 = tmp31 * tmp4
tmp33 = tmp30 - tmp32
tmp35 = tmp28 + tmp34
tmp37 = tmp36 * tmp4
tmp38 = tmp35 - tmp37
tmp39 = triton_helpers.minimum(tmp33, tmp38)
tmp41 = tmp28 + tmp40
tmp43 = tmp42 * tmp4
tmp44 = tmp41 - tmp43
tmp45 = triton_helpers.minimum(tmp39, tmp44)
tmp47 = tmp28 + tmp46
tmp49 = tmp48 * tmp4
tmp50 = tmp47 - tmp49
tmp51 = triton_helpers.minimum(tmp45, tmp50)
tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK])
tmp54 = tl.sum(tmp52, 1)[:, None]
tmp55 = tmp27 + tmp54
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp55, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [xx], Original ATen: [aten.bmm]
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [yy], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [zz], Original ATen: [aten.bmm]
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
del arg0_1
del arg1_1
buf5 = empty_strided_cuda((), (), torch.float32)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [add, mul, P, min_1, min_2, loss_1, loss_2, add_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.min, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_min_mul_sub_sum_0.run(buf7, buf0, buf1, buf2, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class ChamferLoss(nn.Module):
"""
Torch implementation of chamferLoss for n-dimensional geometries
"""
def __init__(self):
self.init__ = super(ChamferLoss, self).__init__()
self.use_cuda = torch.cuda.is_available()
def batch_pairwise_dist(self, x, y):
_bs, num_points_x, _points_dim = x.size()
_, num_points_y, _ = y.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind_x = torch.arange(0, num_points_x)
diag_ind_y = torch.arange(0, num_points_y)
if x.get_device() != -1:
diag_ind_x = diag_ind_x
diag_ind_y = diag_ind_y
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.
transpose(2, 1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = rx.transpose(2, 1) + ry - 2 * zz
return P
def forward(self, preds, gts):
P = self.batch_pairwise_dist(gts, preds)
mins, _ = torch.min(P, 1)
loss_1 = torch.sum(mins)
mins, _ = torch.min(P, 2)
loss_2 = torch.sum(mins)
return loss_1 + loss_2
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_min_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4
r0 = rindex % 4
r2 = rindex
tmp0 = tl.load(in_ptr0 + 16 * r1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (5 * r0 + 16 * r1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (r0 + 16 * r1), None)
tmp7 = tl.load(in_ptr0 + (5 + 16 * r1), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + r0 + 16 * r1), None)
tmp13 = tl.load(in_ptr0 + (10 + 16 * r1), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr2 + (8 + r0 + 16 * r1), None)
tmp19 = tl.load(in_ptr0 + (15 + 16 * r1), None, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr2 + (12 + r0 + 16 * r1), None)
tmp28 = tl.load(in_ptr0 + (5 * r0 + 16 * r1), None, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr1 + 16 * r1, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + 4 * r2, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr1 + (5 + 16 * r1), None, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr2 + (1 + 4 * r2), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr1 + (10 + 16 * r1), None, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr2 + (2 + 4 * r2), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr1 + (15 + 16 * r1), None, eviction_policy=
'evict_last')
tmp48 = tl.load(in_ptr2 + (3 + 4 * r2), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 * tmp4
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.minimum(tmp6, tmp11)
tmp14 = tmp13 + tmp1
tmp16 = tmp15 * tmp4
tmp17 = tmp14 - tmp16
tmp18 = triton_helpers.minimum(tmp12, tmp17)
tmp20 = tmp19 + tmp1
tmp22 = tmp21 * tmp4
tmp23 = tmp20 - tmp22
tmp24 = triton_helpers.minimum(tmp18, tmp23)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp30 = tmp28 + tmp29
tmp32 = tmp31 * tmp4
tmp33 = tmp30 - tmp32
tmp35 = tmp28 + tmp34
tmp37 = tmp36 * tmp4
tmp38 = tmp35 - tmp37
tmp39 = triton_helpers.minimum(tmp33, tmp38)
tmp41 = tmp28 + tmp40
tmp43 = tmp42 * tmp4
tmp44 = tmp41 - tmp43
tmp45 = triton_helpers.minimum(tmp39, tmp44)
tmp47 = tmp28 + tmp46
tmp49 = tmp48 * tmp4
tmp50 = tmp47 - tmp49
tmp51 = triton_helpers.minimum(tmp45, tmp50)
tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK])
tmp54 = tl.sum(tmp52, 1)[:, None]
tmp55 = tmp27 + tmp54
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp55, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg1_1, (4, 4, 4), (
16, 1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (
16, 1, 4), 0), out=buf2)
del arg0_1
del arg1_1
buf5 = empty_strided_cuda((), (), torch.float32)
buf7 = buf5
del buf5
get_raw_stream(0)
triton_per_fused_add_min_mul_sub_sum_0[grid(1)](buf7, buf0, buf1,
buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf7,
class ChamferLossNew(nn.Module):
"""
Torch implementation of chamferLoss for n-dimensional geometries
"""
def __init__(self):
self.init__ = super(ChamferLossNew, self).__init__()
self.use_cuda = torch.cuda.is_available()
def batch_pairwise_dist(self, x, y):
_bs, num_points_x, _points_dim = x.size()
_, num_points_y, _ = y.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind_x = torch.arange(0, num_points_x)
diag_ind_y = torch.arange(0, num_points_y)
if x.get_device() != -1:
diag_ind_x = diag_ind_x
diag_ind_y = diag_ind_y
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.
transpose(2, 1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = rx.transpose(2, 1) + ry - 2 * zz
return P
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
GitMarco27/GitMarco
|
ChamferLoss
| false | 9,249 |
[
"MIT"
] | 0 |
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
https://github.com/GitMarco27/GitMarco/tree/2d9dd93a73a6d7b68d63222512a646cdd988909e
|
ResidualBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ff/cffhfyg22xbribwqqqct5kdcsfampxjsxm645cabtsik2vspgfca.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_2, sub_3, sub_4, sub_5, sub_6
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + (x4), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/z2/cz2vea43txji7lzwlk7l6pg5ibjqr5r3hzs7kktwbkfojjujlo4g.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index_4, _unsafe_index_5
# Graph fragment:
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_6, [None, None, %sub_8, None]), kwargs = {})
# %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %sub_8]), kwargs = {})
triton_poi_fused_reflection_pad2d_1 = async_compile.triton('triton_poi_fused_reflection_pad2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = (xindex // 10) % 10
x2 = (xindex // 100)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) + ((-8)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (64*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/3v/c3vlx5v63fgc7lrigtb2sx3zteqxmgx4tdcgzit4qmvon4nte2a6.py
# Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_1 => convolution_1
# x_3 => add_7, rsqrt, var_mean
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (64*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(in_out_ptr0 + (r2 + (64*x3)), tmp2, xmask)
tl.store(out_ptr2 + (x3), tmp25, xmask)
tl.store(out_ptr0 + (x3), tmp14, xmask)
tl.store(out_ptr1 + (x3), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/fb/cfbaejhvmtcwdjecom6gqhcwwyyzslbnzo3ydhvuamoutxwzwluv.py
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad_1 => _unsafe_index_6, _unsafe_index_7
# Graph fragment:
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_3, [None, None, %sub_8, None]), kwargs = {})
# %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_6, [None, None, None, %sub_8]), kwargs = {})
triton_poi_fused_reflection_pad2d_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = (xindex // 10) % 10
x2 = (xindex // 100)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) + ((-8)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = 64.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp4 * tmp10
tl.store(out_ptr0 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ci/ccict4labfbvxutv7ok7f42nsvg5wzglxatqk6zjp6o6anfzb2o4.py
# Topologically Sorted Source Nodes: [x_4, x_6, add], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# add => add_9
# x_4 => convolution_2
# x_6 => add_8, rsqrt_1, var_mean_1
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_8,), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %view_5), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_4(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (64*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_out_ptr1 + (r2 + (64*x3)), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp22 = tmp4 - tmp14
tmp23 = 64.0
tmp24 = tmp20 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tmp28 = tmp22 * tmp27
tmp29 = tmp21 + tmp28
tl.store(in_out_ptr0 + (r2 + (64*x3)), tmp2, xmask)
tl.store(in_out_ptr1 + (r2 + (64*x3)), tmp29, xmask)
tl.store(out_ptr2 + (x3), tmp27, xmask)
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf2, primals_1, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_s], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 8, 8), (256, 64, 8, 1))
buf4 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_1.run(buf2, buf4, 1600, grid=grid(1600), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1))
buf6 = buf5; del buf5 # reuse
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_2.run(buf6, primals_4, buf7, buf8, buf10, 16, 64, grid=grid(16), stream=stream0)
del primals_4
buf11 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_3.run(buf6, buf7, buf8, buf11, 1600, grid=grid(1600), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 8, 8), (256, 64, 8, 1))
buf13 = buf12; del buf12 # reuse
buf14 = buf8; del buf8 # reuse
buf18 = buf3; del buf3 # reuse
buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_4, x_6, add], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_4.run(buf13, buf18, primals_6, buf14, buf17, 16, 64, grid=grid(16), stream=stream0)
del primals_6
return (buf18, primals_2, primals_3, primals_5, buf2, buf4, buf6, reinterpret_tensor(buf10, (16, ), (1, ), 0), buf11, buf13, reinterpret_tensor(buf17, (16, ), (1, ), 0), reinterpret_tensor(buf14, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, input_channel, output_channel, upsample=True):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, kernel_size=3,
padding=0)
self.conv2 = nn.Conv2d(output_channel, output_channel, kernel_size=
3, padding=0)
self.conv_shortcut = nn.Conv2d(input_channel, output_channel,
kernel_size=1, bias=False)
self.relu = nn.ReLU()
self.norm_1 = nn.InstanceNorm2d(output_channel)
self.norm_2 = nn.InstanceNorm2d(output_channel)
self.upsample = upsample
self.reflecPad1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.reflecPad2 = nn.ReflectionPad2d((1, 1, 1, 1))
def forward(self, x):
if self.upsample:
x = F.interpolate(x, mode='bilinear', scale_factor=2)
x_s = self.conv_shortcut(x)
x = self.conv1(self.reflecPad1(x))
x = self.relu(x)
x = self.norm_1(x)
x = self.conv2(self.reflecPad2(x))
x = self.relu(x)
x = self.norm_2(x)
return x_s + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channel': 4, 'output_channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = xindex // 10 % 10
x2 = xindex // 100
x3 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask)
tl.store(out_ptr2 + x3, tmp25, xmask)
tl.store(out_ptr0 + x3, tmp14, xmask)
tl.store(out_ptr1 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = xindex // 10 % 10
x2 = xindex // 100
x3 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x2),
xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = 64.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp4 * tmp10
tl.store(out_ptr0 + x3, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_4(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_out_ptr1 + (r2 + 64 * x3), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp22 = tmp4 - tmp14
tmp23 = 64.0
tmp24 = tmp20 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tmp28 = tmp22 * tmp27
tmp29 = tmp21 + tmp28
tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask)
tl.store(in_out_ptr1 + (r2 + 64 * x3), tmp29, xmask)
tl.store(out_ptr2 + x3, tmp27, xmask)
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf2, primals_1, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 8, 8), (256, 64, 8, 1))
buf4 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch.
float32)
triton_poi_fused_reflection_pad2d_1[grid(1600)](buf2, buf4, 1600,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1))
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_convolution_2[grid(16)](buf6,
primals_4, buf7, buf8, buf10, 16, 64, XBLOCK=8, num_warps=4,
num_stages=1)
del primals_4
buf11 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch
.float32)
triton_poi_fused_reflection_pad2d_3[grid(1600)](buf6, buf7, buf8,
buf11, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 8, 8), (256, 64, 8, 1))
buf13 = buf12
del buf12
buf14 = buf8
del buf8
buf18 = buf3
del buf3
buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_4[grid(16)](
buf13, buf18, primals_6, buf14, buf17, 16, 64, XBLOCK=8,
num_warps=4, num_stages=1)
del primals_6
return (buf18, primals_2, primals_3, primals_5, buf2, buf4, buf6,
reinterpret_tensor(buf10, (16,), (1,), 0), buf11, buf13,
reinterpret_tensor(buf17, (16,), (1,), 0), reinterpret_tensor(buf14,
(1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf7, (1, 16,
1, 1), (16, 1, 1, 1), 0))
class ResidualBlockNew(nn.Module):
def __init__(self, input_channel, output_channel, upsample=True):
super(ResidualBlockNew, self).__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, kernel_size=3,
padding=0)
self.conv2 = nn.Conv2d(output_channel, output_channel, kernel_size=
3, padding=0)
self.conv_shortcut = nn.Conv2d(input_channel, output_channel,
kernel_size=1, bias=False)
self.relu = nn.ReLU()
self.norm_1 = nn.InstanceNorm2d(output_channel)
self.norm_2 = nn.InstanceNorm2d(output_channel)
self.upsample = upsample
self.reflecPad1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.reflecPad2 = nn.ReflectionPad2d((1, 1, 1, 1))
def forward(self, input_0):
primals_3 = self.conv1.weight
primals_4 = self.conv1.bias
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_2 = self.conv_shortcut.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Holmes-Alan/TxST
|
ResidualBlock
| false | 9,250 |
[
"MIT"
] | 0 |
c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
https://github.com/Holmes-Alan/TxST/tree/c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
BertNonFusedLayerNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/dg/cdgw6x7nju4bzp2wyuwgeanbco7zcjis6yiusovvnpz6zw3yjd3l.py
# Topologically Sorted Source Nodes: [u, s], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# s => sub
# u => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vs/cvsrv62y2zhryf3mg7smlnqpzxf35nl7wxzkoguylniouxrw4scz.py
# Topologically Sorted Source Nodes: [s_1, s_2, add, sqrt, x, mul_1, add_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul_1 => mul_1
# s_1 => mul
# s_2 => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_1 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u, s], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s_1, s_2, add, sqrt, x, mul_1, add_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.sqrt, aten.div]
triton_poi_fused_add_div_mean_mul_sqrt_1.run(primals_2, buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class BertNonFusedLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertNonFusedLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = x - u
s = s * s
s = s.mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_1[grid(256)](primals_2, buf0,
primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class BertNonFusedLayerNormNew(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertNonFusedLayerNormNew, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
LiyuanLucasLiu/FasterTransformer
|
BertNonFusedLayerNorm
| false | 9,251 |
[
"Apache-2.0"
] | 0 |
c28149096030286e87491c7648f5a020aed22cc9
|
https://github.com/LiyuanLucasLiu/FasterTransformer/tree/c28149096030286e87491c7648f5a020aed22cc9
|
GumbelSoftmax
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=3] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mn/cmnnoxanfdjzkzhk3u3osn6ajofqa6smeehuxhkdhj6mi524orwm.py
# Topologically Sorted Source Nodes: [prob, y], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob => div, sum_1
# y => div_1
# Graph fragment:
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob, y], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf1
return (buf0, buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
class GumbelSoftmax(nn.Module):
def __init__(self, f_dim, c_dim):
super(GumbelSoftmax, self).__init__()
self.logits = nn.Linear(f_dim, c_dim)
self.f_dim = f_dim
self.c_dim = c_dim
def sample_gumbel(self, shape, is_cuda=False, eps=1e-20):
U = torch.rand(shape)
if is_cuda:
U = U
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(self, logits, temperature):
y = logits + self.sample_gumbel(logits.size(), logits.is_cuda)
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(self, logits, temperature, hard=False):
"""
ST-gumple-softmax
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
y = self.gumbel_softmax_sample(logits, temperature)
if not hard:
return y
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
y_hard = (y_hard - y).detach() + y
return y_hard
def forward(self, x, temperature=1.0, hard=False):
logits = self.logits(x).view(-1, self.c_dim)
prob = F.softmax(logits, dim=-1)
y = F.softmax(logits, dim=-1)
return logits, prob, y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'f_dim': 4, 'c_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf1
return buf0, buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf3
class GumbelSoftmaxNew(nn.Module):
def __init__(self, f_dim, c_dim):
super(GumbelSoftmaxNew, self).__init__()
self.logits = nn.Linear(f_dim, c_dim)
self.f_dim = f_dim
self.c_dim = c_dim
def sample_gumbel(self, shape, is_cuda=False, eps=1e-20):
U = torch.rand(shape)
if is_cuda:
U = U
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(self, logits, temperature):
y = logits + self.sample_gumbel(logits.size(), logits.is_cuda)
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(self, logits, temperature, hard=False):
"""
ST-gumple-softmax
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
y = self.gumbel_softmax_sample(logits, temperature)
if not hard:
return y
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
y_hard = (y_hard - y).detach() + y
return y_hard
def forward(self, input_0):
primals_1 = self.logits.weight
primals_2 = self.logits.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1], output[2]
|
Kaya176/GMVAE
|
GumbelSoftmax
| false | 9,252 |
[
"MIT"
] | 0 |
6369be52dbac796e2f836f51b16aaa5c61247350
|
https://github.com/Kaya176/GMVAE/tree/6369be52dbac796e2f836f51b16aaa5c61247350
|
CodeLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/r2/cr2hnuomfztbbksk2upcuhfke7uh2suy435c4sbwlzcpxe46mh4c.py
# Topologically Sorted Source Nodes: [ne, float_1, sum_1, code_similar], Original ATen: [aten.ne, aten._to_copy, aten.sum, aten.mean]
# Source node to ATen node mapping:
# code_similar => mean
# float_1 => convert_element_type
# ne => ne
# sum_1 => sum_1
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused__to_copy_mean_ne_sum_0 = async_compile.triton('triton_per_fused__to_copy_mean_ne_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_mean_ne_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_mean_ne_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tmp6 = tmp4 != tmp5
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 != tmp10
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 != tmp15
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp13 + tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/oh/coh2yeroil7763huhxa6jpwieogggy6f3b7l7xj3m5iim2nz3zds.py
# Topologically Sorted Source Nodes: [ne, float_1, sum_1, code_similar, mse_loss, mse_loss_1, add, feature_similar, mul, add_1], Original ATen: [aten.ne, aten._to_copy, aten.sum, aten.mean, aten.mse_loss, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# code_similar => mean
# feature_similar => div
# float_1 => convert_element_type
# mse_loss => mean_1, pow_1, sub
# mse_loss_1 => mean_2, pow_2, sub_1
# mul => mul
# ne => ne
# sum_1 => sum_1
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg1_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, %mean_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 0.001), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mul), kwargs = {})
triton_per_fused__to_copy_add_div_mean_mse_loss_mul_ne_sum_1 = async_compile.triton('triton_per_fused__to_copy_add_div_mean_mse_loss_mul_ne_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_mean_mse_loss_mul_ne_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_mean_mse_loss_mul_ne_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp7 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp14 = tl.load(in_out_ptr0 + (0))
tmp15 = tl.broadcast_to(tmp14, [1])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp16 = 64.0
tmp17 = tmp15 / tmp16
tmp18 = 256.0
tmp19 = tmp6 / tmp18
tmp20 = tmp13 / tmp18
tmp21 = tmp19 + tmp20
tmp22 = 0.5
tmp23 = tmp21 * tmp22
tmp24 = 0.001
tmp25 = tmp23 * tmp24
tmp26 = tmp17 + tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [ne, float_1, sum_1, code_similar], Original ATen: [aten.ne, aten._to_copy, aten.sum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_mean_ne_sum_0.run(arg0_1, arg1_1, buf0, 1, 64, grid=grid(1), stream=stream0)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [ne, float_1, sum_1, code_similar, mse_loss, mse_loss_1, add, feature_similar, mul, add_1], Original ATen: [aten.ne, aten._to_copy, aten.sum, aten.mean, aten.mse_loss, aten.add, aten.div, aten.mul]
triton_per_fused__to_copy_add_div_mean_mse_loss_mul_ne_sum_1.run(buf3, arg2_1, arg0_1, arg3_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class CodeLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss()
def forward(self, origin_code, trans_code, origin_feature,
trans_feature, weight=0.001):
code_similar = torch.mean(torch.sum((origin_code != trans_code).
float(), dim=1))
feature_similar = (self.loss(origin_feature, origin_code) + self.
loss(trans_feature, trans_code)) / 2
return code_similar + weight * feature_similar
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_mean_ne_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tmp6 = tmp4 != tmp5
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 != tmp10
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 != tmp15
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp13 + tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
@triton.jit
def triton_per_fused__to_copy_add_div_mean_mse_loss_mul_ne_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp7 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp14 = tl.load(in_out_ptr0 + 0)
tmp15 = tl.broadcast_to(tmp14, [1])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp16 = 64.0
tmp17 = tmp15 / tmp16
tmp18 = 256.0
tmp19 = tmp6 / tmp18
tmp20 = tmp13 / tmp18
tmp21 = tmp19 + tmp20
tmp22 = 0.5
tmp23 = tmp21 * tmp22
tmp24 = 0.001
tmp25 = tmp23 * tmp24
tmp26 = tmp17 + tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_mean_ne_sum_0[grid(1)](arg0_1, arg1_1,
buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = buf0
del buf0
triton_per_fused__to_copy_add_div_mean_mse_loss_mul_ne_sum_1[grid(1)](
buf3, arg2_1, arg0_1, arg3_1, arg1_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf3,
class CodeLossNew(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.MSELoss()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
KMU-AELAB/DeepHashing
|
CodeLoss
| false | 9,253 |
[
"MIT"
] | 0 |
c60069884778246c5a6e11161b78af69e5c8c176
|
https://github.com/KMU-AELAB/DeepHashing/tree/c60069884778246c5a6e11161b78af69e5c8c176
|
VertexDirectEmbedder
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/xq/cxqinuparlha25j4geyv6tolvpah7qdqdkpecjesyn3kblysszql.py
# Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
# Source node to ATen node mapping:
# clamp => clamp_min
# norm => pow_1, pow_2, sum_1
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {})
triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedder(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedder, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
def forward(self) ->torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(self.embeddings)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def get_inputs():
return []
def get_init_inputs():
return [[], {'num_vertices': 4, 'embed_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(16)](primals_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf0, primals_1
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedderNew(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedderNew, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def forward(self):
primals_1 = self.embeddings
output = call([primals_1])
return output[0]
|
Lele-Zhou/detectron2-based
|
VertexDirectEmbedder
| false | 9,254 |
[
"Apache-2.0"
] | 0 |
a6f65174c6f11918c8e7600746f9f87baa89ecc0
|
https://github.com/Lele-Zhou/detectron2-based/tree/a6f65174c6f11918c8e7600746f9f87baa89ecc0
|
Rot180
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/yk/cykfxtpmz573b3mpfj7bxbmd2qw4pqlxedh5nnwa22xvl6sb22dj.py
# Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip]
# Source node to ATen node mapping:
# flip => rev
# Graph fragment:
# %rev : [num_users=1] = call_function[target=torch.ops.prims.rev.default](args = (%arg0_1, [2, 3]), kwargs = {})
triton_poi_fused_flip_0 = async_compile.triton('triton_poi_fused_flip_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_flip_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip]
stream0 = get_raw_stream(0)
triton_poi_fused_flip_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def rot180(input: 'torch.Tensor') ->torch.Tensor:
"""Rotate a tensor image or a batch of tensor images
180 degrees. Input must be a tensor of shape (C, H, W)
or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The rotated image tensor
"""
return torch.flip(input, [-2, -1])
class Rot180(nn.Module):
"""Rotate a tensor image or a batch of tensor images
180 degrees. Input must be a tensor of shape (C, H, W)
or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.rot180(input)
tensor([[[1, 1, 0],
[0, 0, 0],
[0, 0, 0]]])
"""
def __init__(self) ->None:
super(Rot180, self).__init__()
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return rot180(input)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * x0 + 16 * x1), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_flip_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def rot180(input: 'torch.Tensor') ->torch.Tensor:
"""Rotate a tensor image or a batch of tensor images
180 degrees. Input must be a tensor of shape (C, H, W)
or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The rotated image tensor
"""
return torch.flip(input, [-2, -1])
class Rot180New(nn.Module):
"""Rotate a tensor image or a batch of tensor images
180 degrees. Input must be a tensor of shape (C, H, W)
or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.rot180(input)
tensor([[[1, 1, 0],
[0, 0, 0],
[0, 0, 0]]])
"""
def __init__(self) ->None:
super(Rot180New, self).__init__()
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
Rot180
| false | 9,255 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
ResidualAttentionBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# ret => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# ret => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ah/cahpqo3o7hv3q647n5lretlqvfljlubj4ic7gscxws4yvkm5jzff.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_9, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/id/cidhx2qv2kci37tvy6z4ewaqfmq7whhaejd6kgfwwkpiinbays7z.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_3
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_6, 1.0), kwargs = {})
triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5l/c5lr3fbju3rzhzjnfdhv5evmxwg6sfeq26etlwygdbmjwzziqmty.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_14, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_14, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__safe_softmax_4 = async_compile.triton('triton_poi_fused__safe_softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__safe_softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__safe_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ue/cue46mvjwzenos4q7lzsgaczgcooiukadngwwlhdoblv6ugei3uv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => any_1, div, eq, full_default, logical_not, logical_not_1, sum_1, where
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_14, -inf), kwargs = {})
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq,), kwargs = {})
# %any_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not, -1, True), kwargs = {})
# %logical_not_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_1,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_1, %full_default, %div), kwargs = {})
triton_poi_fused__safe_softmax_5 = async_compile.triton('triton_poi_fused__safe_softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__safe_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__safe_softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bu/cbuwmu2mfp5lph35cxxxg6q2iiyw5l7wwbgjvpc2uau2jqndueez.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vd/cvdxk4eaksba3zsprpzfb3vx7t7nnrtdy3faz6dzcvumwkab3o3e.py
# Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# ret_1 => var_mean_1
# x => add_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ee/ceevx2e7wi6j4h2hf3scvp3at64mcchaq7w4yqwjgj6s77ewvx2h.py
# Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# ret_1 => add_3, add_4, mul_4, mul_5, rsqrt_1, sub_2
# x => add_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_9), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_8), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/s4/cs4y6xlhpgdaidbv4fzi42ren6zhpqqveelpjlrjwzf7b52imcf5.py
# Topologically Sorted Source Nodes: [mul, sigmoid, input_2], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# input_2 => mul_7
# mul => mul_6
# sigmoid => sigmoid
# Graph fragment:
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, 1.702), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_6,), kwargs = {})
# %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_9 = async_compile.triton('triton_poi_fused_mul_sigmoid_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/g3/cg3qezdtknb7gkpzhrsk3kwygoxnfplxs7kvhkhixedr43ssvdqx.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add_2
# x_1 => add_5
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 1, 4, 16), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf4, (1, 4, 1, 4), (16, 1, 16, 4), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(buf7, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 0), 0), reinterpret_tensor(buf7, (4, 1, 4), (1, 0, 4), 0), out=buf8)
buf9 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
triton_poi_fused__safe_softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
triton_poi_fused__safe_softmax_5.run(buf8, buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf11, buf12, 4, 4, grid=grid(4, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_7
buf14 = buf1; del buf1 # reuse
buf15 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf13, buf14, buf15, 4, grid=grid(4), stream=stream0)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_1, buf13, buf14, buf15, primals_8, primals_9, buf16, 16, grid=grid(16), stream=stream0)
del buf14
del buf15
del primals_9
buf17 = reinterpret_tensor(buf9, (4, 16), (16, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf16, reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = reinterpret_tensor(buf8, (4, 16), (16, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [mul, sigmoid, input_2], Original ATen: [aten.mul, aten.sigmoid]
triton_poi_fused_mul_sigmoid_9.run(buf17, buf18, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf18, reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf19)
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf20, primals_1, buf13, primals_13, 16, grid=grid(16), stream=stream0)
del primals_13
return (buf20, primals_1, primals_8, buf2, buf10, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf13, buf16, buf17, buf18, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 4, 4), 0), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 16), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from collections import OrderedDict
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: 'torch.Tensor'):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: 'torch.Tensor'):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: 'int', n_head: 'int', attn_mask:
'torch.Tensor'=None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model,
d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear(
d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: 'torch.Tensor'):
self.attn_mask = self.attn_mask if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask
)[0]
def forward(self, x: 'torch.Tensor'):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'n_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__safe_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__safe_softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 16), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf4, (1, 4, 1, 4), (16, 1, 16, 4), 0)
del buf4
triton_poi_fused_mul_3[grid(16)](buf7, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 0), 0
), reinterpret_tensor(buf7, (4, 1, 4), (1, 0, 4), 0), out=buf8)
buf9 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__safe_softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__safe_softmax_5[grid(64)](buf8, buf9, buf10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32)
triton_poi_fused_clone_6[grid(4, 4)](buf11, buf12, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_7
buf14 = buf1
del buf1
buf15 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(4)](primals_1, buf13,
buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_1, buf13,
buf14, buf15, primals_8, primals_9, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf14
del buf15
del primals_9
buf17 = reinterpret_tensor(buf9, (4, 16), (16, 1), 0)
del buf9
extern_kernels.addmm(primals_11, buf16, reinterpret_tensor(
primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = reinterpret_tensor(buf8, (4, 16), (16, 1), 0)
del buf8
triton_poi_fused_mul_sigmoid_9[grid(64)](buf17, buf18, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf18, reinterpret_tensor(primals_12, (16, 4), (1,
16), 0), out=buf19)
buf20 = buf19
del buf19
triton_poi_fused_add_10[grid(16)](buf20, primals_1, buf13,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf20, primals_1, primals_8, buf2, buf10, reinterpret_tensor(
buf12, (4, 4), (4, 1), 0), buf13, buf16, buf17, buf18, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 4, 4), 0),
reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 16), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: 'torch.Tensor'):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: 'torch.Tensor'):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlockNew(nn.Module):
def __init__(self, d_model: 'int', n_head: 'int', attn_mask:
'torch.Tensor'=None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model,
d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear(
d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: 'torch.Tensor'):
self.attn_mask = self.attn_mask if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask
)[0]
def forward(self, input_0):
primals_4 = self.attn.in_proj_weight
primals_5 = self.attn.in_proj_bias
primals_1 = self.attn.out_proj.weight
primals_2 = self.attn.out_proj.bias
primals_3 = self.ln_1.weight
primals_7 = self.ln_1.bias
primals_10 = self.mlp.c_fc.weight
primals_11 = self.mlp.c_fc.bias
primals_12 = self.mlp.c_proj.weight
primals_8 = self.mlp.c_proj.bias
primals_9 = self.ln_2.weight
primals_13 = self.ln_2.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Holmes-Alan/TxST
|
ResidualAttentionBlock
| false | 9,256 |
[
"MIT"
] | 0 |
c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
https://github.com/Holmes-Alan/TxST/tree/c5b59a12bbb9e62244c3b608581d5cb9606525e0
|
L2Norm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/h6/ch6lbegsbikmeuedhuqntueffmboz2ejtnlwrp3sosxr5gscbhtj.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# norm => add
# out => mul
# pow_1 => pow_1
# sqrt => sqrt
# sum_1 => sum_1
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-10
tmp15 = tmp13 + tmp14
tmp16 = tmp1 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = torch.div(x, norm)
out = self.weight.view(1, -1, 1, 1) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-10
tmp15 = tmp13 + tmp14
tmp16 = tmp1 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
LucasVandroux/ssd.pytorch
|
L2Norm
| false | 9,257 |
[
"MIT"
] | 0 |
d4471f6cfe2aa003ba5d7d9d9ab4d78936bb3f02
|
https://github.com/LucasVandroux/ssd.pytorch/tree/d4471f6cfe2aa003ba5d7d9d9ab4d78936bb3f02
|
Hflip
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/mw/cmwuhpoo35erq3s5jprdn2bal2k4sfcwtsv6hgm3szdtd6g2t2ew.py
# Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip]
# Source node to ATen node mapping:
# flip => rev
# Graph fragment:
# %rev : [num_users=1] = call_function[target=torch.ops.prims.rev.default](args = (%arg0_1, [3]), kwargs = {})
triton_poi_fused_flip_0 = async_compile.triton('triton_poi_fused_flip_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_flip_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (3 + ((-1)*x0) + (4*x1)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip]
stream0 = get_raw_stream(0)
triton_poi_fused_flip_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def hflip(input: 'torch.Tensor') ->torch.Tensor:
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
"""
return torch.flip(input, [-1])
class Hflip(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.hflip(input)
tensor([[[0, 0, 0],
[0, 0, 0],
[1, 1, 0]]])
"""
def __init__(self) ->None:
super(Hflip, self).__init__()
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return hflip(input)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (3 + -1 * x0 + 4 * x1), xmask, eviction_policy
='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_flip_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hflip(input: 'torch.Tensor') ->torch.Tensor:
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
"""
return torch.flip(input, [-1])
class HflipNew(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.hflip(input)
tensor([[[0, 0, 0],
[0, 0, 0],
[1, 1, 0]]])
"""
def __init__(self) ->None:
super(HflipNew, self).__init__()
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
Hflip
| false | 9,258 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
RgbaToBgr
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/af/caf7neijm3kbkzae765spedgco2ojidwgdae5r4fritcjagme5n2.py
# Topologically Sorted Source Nodes: [x_rgb, out], Original ATen: [aten.cat, aten.flip]
# Source node to ATen node mapping:
# out => rev
# x_rgb => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1, %getitem_2], -3), kwargs = {})
# %rev : [num_users=1] = call_function[target=torch.ops.prims.rev.default](args = (%cat, [1]), kwargs = {})
triton_poi_fused_cat_flip_0 = async_compile.triton('triton_poi_fused_cat_flip_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_flip_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 3
x0 = xindex % 16
x2 = (xindex // 48)
x3 = xindex
tmp0 = 2 + ((-1)*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_rgb, out], Original ATen: [aten.cat, aten.flip]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_flip_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def bgr_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a BGR image to RGB.
See :class:`~kornia.color.BgrToRgb` for details.
Args:
image (torch.Tensor): BGR Image to be converted to RGB.
Returns:
torch.Tensor: RGB version of the image.
"""
if not torch.is_tensor(image):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}'
.format(image.shape))
out: 'torch.Tensor' = image.flip(-3)
return out
def rgb_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a RGB image to BGR.
See :class:`~kornia.color.RgbToBgr` for details.
Args:
image (torch.Tensor): RGB Image to be converted to BGR.
Returns:
torch.Tensor: BGR version of the image.
"""
return bgr_to_rgb(image)
def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert image from RGBA to RGB.
See :class:`~kornia.color.RgbaToRgb` for details.
Args:
image (torch.Tensor): RGBA Image to be converted to RGB.
Returns:
torch.Tensor: RGB version of the image.
"""
if not torch.is_tensor(image):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')
r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3)
a_one = torch.tensor(1.0) - a
a_one * r + a * r
a_one * g + a * g
a_one * b + a * b
return torch.cat([r, g, b], dim=-3)
def rgba_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert image from RGBA to BGR.
See :class:`~kornia.color.RgbaToBgr` for details.
Args:
image (torch.Tensor): RGBA Image to be converted to BGR.
Returns:
torch.Tensor: BGR version of the image.
"""
if not torch.is_tensor(image):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')
x_rgb: 'torch.Tensor' = rgba_to_rgb(image)
return rgb_to_bgr(x_rgb)
class RgbaToBgr(nn.Module):
"""Convert image from RGBA to BGR.
Remove an alpha channel from BGR image.
returns:
torch.Tensor: BGR version of the image.
shape:
- image: :math:`(*, 4, H, W)`
- output: :math:`(*, 3, H, W)`
Examples::
>>> input = torch.rand(2, 4, 4, 5)
>>> rgba = kornia.color.RgbaToBgr()
>>> output = rgba(input) # 2x3x4x5
"""
def __init__(self) ->None:
super(RgbaToBgr, self).__init__()
def forward(self, image: 'torch.Tensor') ->torch.Tensor:
return rgba_to_bgr(image)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = 2 + -1 * x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 3, tl.int64)
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_flip_0[grid(192)](arg0_1, buf0, 192, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def bgr_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a BGR image to RGB.
See :class:`~kornia.color.BgrToRgb` for details.
Args:
image (torch.Tensor): BGR Image to be converted to RGB.
Returns:
torch.Tensor: RGB version of the image.
"""
if not torch.is_tensor(image):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}'
.format(image.shape))
out: 'torch.Tensor' = image.flip(-3)
return out
def rgb_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a RGB image to BGR.
See :class:`~kornia.color.RgbToBgr` for details.
Args:
image (torch.Tensor): RGB Image to be converted to BGR.
Returns:
torch.Tensor: BGR version of the image.
"""
return bgr_to_rgb(image)
def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert image from RGBA to RGB.
See :class:`~kornia.color.RgbaToRgb` for details.
Args:
image (torch.Tensor): RGBA Image to be converted to RGB.
Returns:
torch.Tensor: RGB version of the image.
"""
if not torch.is_tensor(image):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')
r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3)
a_one = torch.tensor(1.0) - a
a_one * r + a * r
a_one * g + a * g
a_one * b + a * b
return torch.cat([r, g, b], dim=-3)
def rgba_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert image from RGBA to BGR.
See :class:`~kornia.color.RgbaToBgr` for details.
Args:
image (torch.Tensor): RGBA Image to be converted to BGR.
Returns:
torch.Tensor: BGR version of the image.
"""
if not torch.is_tensor(image):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')
x_rgb: 'torch.Tensor' = rgba_to_rgb(image)
return rgb_to_bgr(x_rgb)
class RgbaToBgrNew(nn.Module):
"""Convert image from RGBA to BGR.
Remove an alpha channel from BGR image.
returns:
torch.Tensor: BGR version of the image.
shape:
- image: :math:`(*, 4, H, W)`
- output: :math:`(*, 3, H, W)`
Examples::
>>> input = torch.rand(2, 4, 4, 5)
>>> rgba = kornia.color.RgbaToBgr()
>>> output = rgba(input) # 2x3x4x5
"""
def __init__(self) ->None:
super(RgbaToBgrNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
RgbaToBgr
| false | 9,259 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
InvDepth
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/w7/cw7ogushagw5rd6eixmm5u3erviyy7hf33356b3glzcg42jbcwxp.py
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
# Source node to ATen node mapping:
# clamp => clamp_max, clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%primals_1, 0.04), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2.0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%primals_1, 0.04), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%primals_1, 2.0), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {})
triton_poi_fused_clamp_ge_le_logical_and_0 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.04
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
del primals_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class InvDepth(nn.Module):
def __init__(self, height, width, min_depth=0.5, max_depth=25.0):
super(InvDepth, self).__init__()
self._min_range = 1.0 / max_depth
self._max_range = 1.0 / min_depth
self.w = nn.Parameter(self._init_weights(height, width))
def _init_weights(self, height, width):
r1 = self._min_range
r2 = self._min_range + (self._max_range - self._min_range) * 0.1
w_init = (r1 - r2) * torch.rand(1, 1, height, width) + r2
return w_init
def forward(self):
return self.w.clamp(min=self._min_range, max=self._max_range)
def get_inputs():
return []
def get_init_inputs():
return [[], {'height': 4, 'width': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.04
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0[grid(16)](primals_1,
buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf0, buf1
class InvDepthNew(nn.Module):
def __init__(self, height, width, min_depth=0.5, max_depth=25.0):
super(InvDepthNew, self).__init__()
self._min_range = 1.0 / max_depth
self._max_range = 1.0 / min_depth
self.w = nn.Parameter(self._init_weights(height, width))
def _init_weights(self, height, width):
r1 = self._min_range
r2 = self._min_range + (self._max_range - self._min_range) * 0.1
w_init = (r1 - r2) * torch.rand(1, 1, height, width) + r2
return w_init
def forward(self):
primals_1 = self.w
output = call([primals_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
InvDepth
| false | 9,260 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
PSNRLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/bz/cbz4jfqnivmqo3kagpgcw2e6ojdrfmyv6p6tqz7c2t57rwabulea.py
# Topologically Sorted Source Nodes: [mul, mse_val, truediv, log10, mul_1], Original ATen: [aten.mul, aten.mse_loss, aten.div, aten.log10]
# Source node to ATen node mapping:
# log10 => log10
# mse_val => mean, pow_1, sub
# mul => full_default_1
# mul_1 => mul_1
# truediv => div
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 16.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%full_default_1, %mean), kwargs = {})
# %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%div,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, 10), kwargs = {})
triton_per_fused_div_log10_mse_loss_mul_0 = async_compile.triton('triton_per_fused_div_log10_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_log10_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 16.0
tmp10 = tmp9 / tmp8
tmp11 = libdevice.log10(tmp10)
tmp12 = 10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, mse_val, truediv, log10, mul_1], Original ATen: [aten.mul, aten.mse_loss, aten.div, aten.log10]
stream0 = get_raw_stream(0)
triton_per_fused_div_log10_mse_loss_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float'
) ->torch.Tensor:
"""Function that computes PSNR
See :class:`~kornia.losses.PSNR` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(
f'Expected 2 torch tensors but got {type(input)} and {type(target)}'
)
if input.shape != target.shape:
raise TypeError(
f'Expected tensors of equal shapes, but got {input.shape} and {target.shape}'
)
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: 'torch.Tensor' = torch.tensor(max_val).to(input.device).to(
input.dtype)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
class PSNRLoss(nn.Module):
"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image,
.. math::
\\text{MSE}(I,T) = \\frac{1}{m\\,n}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: 'float') ->None:
super(PSNRLoss, self).__init__()
self.max_val: 'float' = max_val
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
return psnr_loss(input, target, self.max_val)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'max_val': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.functional import mse_loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 16.0
tmp10 = tmp9 / tmp8
tmp11 = libdevice.log10(tmp10)
tmp12 = 10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_log10_mse_loss_mul_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float'
) ->torch.Tensor:
"""Function that computes PSNR
See :class:`~kornia.losses.PSNR` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(
f'Expected 2 torch tensors but got {type(input)} and {type(target)}'
)
if input.shape != target.shape:
raise TypeError(
f'Expected tensors of equal shapes, but got {input.shape} and {target.shape}'
)
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: 'torch.Tensor' = torch.tensor(max_val).to(input.device).to(
input.dtype)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
class PSNRLossNew(nn.Module):
"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image,
.. math::
\\text{MSE}(I,T) = \\frac{1}{m\\,n}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: 'float') ->None:
super(PSNRLossNew, self).__init__()
self.max_val: 'float' = max_val
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
PSNRLoss
| false | 9,261 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
TotalVariation
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/3o/c3oy2cuxgl6yk5hywykwsghjl7htx5ny24ujxp7mydfu6cw4hmel.py
# Topologically Sorted Source Nodes: [pixel_dif1, abs_1, sum_1, pixel_dif2, abs_2, sum_2, add], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# add => add
# pixel_dif1 => sub
# pixel_dif2 => sub_1
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_1, %slice_3), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [-3, -2, -1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %slice_8), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_2, [-3, -2, -1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_abs_add_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex % 12
r2 = (rindex // 12)
x0 = xindex
r3 = rindex % 3
r4 = (rindex // 3)
tmp0 = tl.load(in_ptr0 + (4 + r1 + (16*r2) + (64*x0)), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r1 + (16*r2) + (64*x0)), rmask & xmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r3 + (4*r4) + (64*x0)), rmask & xmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r3 + (4*r4) + (64*x0)), rmask & xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask & xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pixel_dif1, abs_1, sum_1, pixel_dif2, abs_2, sum_2, add], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0.run(buf2, arg0_1, 4, 48, grid=grid(4), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def total_variation(img: 'torch.Tensor') ->torch.Tensor:
"""Function that computes Total Variation.
See :class:`~kornia.losses.TotalVariation` for details.
"""
if not torch.is_tensor(img):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(img)}')
img_shape = img.shape
if len(img_shape) == 3 or len(img_shape) == 4:
pixel_dif1 = img[..., 1:, :] - img[..., :-1, :]
pixel_dif2 = img[..., :, 1:] - img[..., :, :-1]
reduce_axes = -3, -2, -1
else:
raise ValueError(
'Expected input tensor to be of ndim 3 or 4, but got ' + str(
len(img_shape)))
return pixel_dif1.abs().sum(dim=reduce_axes) + pixel_dif2.abs().sum(dim
=reduce_axes)
class TotalVariation(nn.Module):
"""Computes the Total Variation according to
[1] https://en.wikipedia.org/wiki/Total_variation
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)` where C = number of classes.
- Output: :math:`(N,)` or :math:`()`
Examples:
>>> kornia.losses.total_variation(torch.ones(3,4,4)) # tensor(0.)
>>> tv = kornia.losses.TotalVariation()
>>> output = tv(torch.ones(2,3,4,4)) # tensor([0., 0.])
>>> output.backward()
"""
def __init__(self) ->None:
super(TotalVariation, self).__init__()
def forward(self, img) ->torch.Tensor:
return total_variation(img)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex % 12
r2 = rindex // 12
x0 = xindex
r3 = rindex % 3
r4 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r1 + 16 * r2 + 64 * x0), rmask & xmask,
other=0.0)
tmp1 = tl.load(in_ptr0 + (r1 + 16 * r2 + 64 * x0), rmask & xmask, other=0.0
)
tmp8 = tl.load(in_ptr0 + (1 + r3 + 4 * r4 + 64 * x0), rmask & xmask,
other=0.0)
tmp9 = tl.load(in_ptr0 + (r3 + 4 * r4 + 64 * x0), rmask & xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask & xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0[grid(4)](buf2, arg0_1, 4, 48,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
def total_variation(img: 'torch.Tensor') ->torch.Tensor:
"""Function that computes Total Variation.
See :class:`~kornia.losses.TotalVariation` for details.
"""
if not torch.is_tensor(img):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(img)}')
img_shape = img.shape
if len(img_shape) == 3 or len(img_shape) == 4:
pixel_dif1 = img[..., 1:, :] - img[..., :-1, :]
pixel_dif2 = img[..., :, 1:] - img[..., :, :-1]
reduce_axes = -3, -2, -1
else:
raise ValueError(
'Expected input tensor to be of ndim 3 or 4, but got ' + str(
len(img_shape)))
return pixel_dif1.abs().sum(dim=reduce_axes) + pixel_dif2.abs().sum(dim
=reduce_axes)
class TotalVariationNew(nn.Module):
"""Computes the Total Variation according to
[1] https://en.wikipedia.org/wiki/Total_variation
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)` where C = number of classes.
- Output: :math:`(N,)` or :math:`()`
Examples:
>>> kornia.losses.total_variation(torch.ones(3,4,4)) # tensor(0.)
>>> tv = kornia.losses.TotalVariation()
>>> output = tv(torch.ones(2,3,4,4)) # tensor([0., 0.])
>>> output.backward()
"""
def __init__(self) ->None:
super(TotalVariationNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
TotalVariation
| false | 9,262 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
DenseNet2D_up_block_concat
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/64/c64u3fydyraygdta4hpb5tbypd6hsmokokcnxa4tvdwtrww6vrlb.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%_unsafe_index, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 8
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = (xindex // 128)
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp6 = tmp5.to(tl.float32)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = tmp8.to(tl.int32)
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp11 * tmp7
tmp13 = tmp12.to(tl.int32)
tmp14 = tl.load(in_ptr0 + (tmp13 + (4*tmp9) + (16*x2) + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr1 + (x4 + (16*((-4) + x2)) + (64*x3)), tmp15 & xmask, other=0.0)
tmp19 = tl.where(tmp4, tmp14, tmp18)
tl.store(out_ptr0 + (x5), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wb/cwb3vkj4flzn5u7xzmpimvyu2u44x3a6ojz5jdx3722vu63wj2e7.py
# Topologically Sorted Source Nodes: [conv2d_1, x1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x1 => gt
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/se/csegqqqvt7s4dxdhltx3pc7jrn7mci6yb5efoqyuwi54tarcwqgg.py
# Topologically Sorted Source Nodes: [x21], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x21 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %where], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 12
x0 = xindex % 16
x2 = (xindex // 192)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (128*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 12, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp6 & xmask, other=0.0).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp11 = tl.load(in_ptr3 + ((-8) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.01
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp5, tmp17)
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uh/cuhdgfmdqftwwyx7lnvl7fwehbonjzwctoheihvgrq72kq6esegz.py
# Topologically Sorted Source Nodes: [conv2d_3, out], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# out => gt_1, mul_5, where_1
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_9, %primals_10, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_3, %mul_5), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf3, primals_6, buf4, 256, grid=grid(256), stream=stream0)
buf5 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x21], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf0, buf4, buf3, primals_6, buf5, 768, grid=grid(768), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, primals_8, 256, grid=grid(256), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, out], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf8, primals_10, buf9, buf10, 256, grid=grid(256), stream=stream0)
del buf8
del primals_10
return (buf10, primals_3, primals_5, primals_7, primals_9, buf0, buf2, buf4, buf5, buf7, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 12, 1, 1), (12, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class DenseNet2D_up_block_concat(nn.Module):
def __init__(self, skip_channels, input_channels, output_channels,
up_stride, dropout=False, prob=0):
super(DenseNet2D_up_block_concat, self).__init__()
self.conv11 = nn.Conv2d(skip_channels + input_channels,
output_channels, kernel_size=(1, 1), padding=(0, 0))
self.conv12 = nn.Conv2d(output_channels, output_channels,
kernel_size=(3, 3), padding=(1, 1))
self.conv21 = nn.Conv2d(skip_channels + input_channels +
output_channels, output_channels, kernel_size=(1, 1), padding=(
0, 0))
self.conv22 = nn.Conv2d(output_channels, output_channels,
kernel_size=(3, 3), padding=(1, 1))
self.relu = nn.LeakyReLU()
self.up_stride = up_stride
self.dropout = dropout
self.dropout1 = nn.Dropout(p=prob)
self.dropout2 = nn.Dropout(p=prob)
def forward(self, prev_feature_map, x):
x = nn.functional.interpolate(x, scale_factor=self.up_stride, mode=
'nearest')
x = torch.cat((x, prev_feature_map), dim=1)
if self.dropout:
x1 = self.relu(self.dropout1(self.conv12(self.conv11(x))))
x21 = torch.cat((x, x1), dim=1)
out = self.relu(self.dropout2(self.conv22(self.conv21(x21))))
else:
x1 = self.relu(self.conv12(self.conv11(x)))
x21 = torch.cat((x, x1), dim=1)
out = self.relu(self.conv22(self.conv21(x21)))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'skip_channels': 4, 'input_channels': 4, 'output_channels':
4, 'up_stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 8
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex // 128
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp6 = tmp5.to(tl.float32)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tmp9 = tmp8.to(tl.int32)
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp11 * tmp7
tmp13 = tmp12.to(tl.int32)
tmp14 = tl.load(in_ptr0 + (tmp13 + 4 * tmp9 + 16 * x2 + 64 * x3), tmp4 &
xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp18 = tl.load(in_ptr1 + (x4 + 16 * (-4 + x2) + 64 * x3), tmp15 &
xmask, other=0.0)
tmp19 = tl.where(tmp4, tmp14, tmp18)
tl.store(out_ptr0 + x5, tmp19, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 128 * x2), tmp4 & xmask, other=0.0
)
tmp6 = tmp0 >= tmp3
tl.full([1], 12, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp6 & xmask,
other=0.0).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr3 + (-8 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.01
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp5, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf3,
primals_6, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_3[grid(768)](buf0, buf4, buf3, primals_6, buf5,
768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_1[grid(256)](buf7, primals_8, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf8 = extern_kernels.convolution(buf7, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = buf3
del buf3
triton_poi_fused_convolution_leaky_relu_4[grid(256)](buf8,
primals_10, buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1
)
del buf8
del primals_10
return (buf10, primals_3, primals_5, primals_7, primals_9, buf0, buf2,
buf4, buf5, buf7, buf9)
class DenseNet2D_up_block_concatNew(nn.Module):
def __init__(self, skip_channels, input_channels, output_channels,
up_stride, dropout=False, prob=0):
super(DenseNet2D_up_block_concatNew, self).__init__()
self.conv11 = nn.Conv2d(skip_channels + input_channels,
output_channels, kernel_size=(1, 1), padding=(0, 0))
self.conv12 = nn.Conv2d(output_channels, output_channels,
kernel_size=(3, 3), padding=(1, 1))
self.conv21 = nn.Conv2d(skip_channels + input_channels +
output_channels, output_channels, kernel_size=(1, 1), padding=(
0, 0))
self.conv22 = nn.Conv2d(output_channels, output_channels,
kernel_size=(3, 3), padding=(1, 1))
self.relu = nn.LeakyReLU()
self.up_stride = up_stride
self.dropout = dropout
self.dropout1 = nn.Dropout(p=prob)
self.dropout2 = nn.Dropout(p=prob)
def forward(self, input_0, input_1):
primals_3 = self.conv11.weight
primals_4 = self.conv11.bias
primals_5 = self.conv12.weight
primals_6 = self.conv12.bias
primals_7 = self.conv21.weight
primals_8 = self.conv21.bias
primals_9 = self.conv22.weight
primals_10 = self.conv22.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
KamranBinaee/RGnet
|
DenseNet2D_up_block_concat
| false | 9,263 |
[
"MIT"
] | 0 |
85861ab47a94018c8f8fa01fb7e64d8eec7fdc43
|
https://github.com/KamranBinaee/RGnet/tree/85861ab47a94018c8f8fa01fb7e64d8eec7fdc43
|
Vflip
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/oo/coo4q5pnqu4m2me4f4rovtyxouj7hiofnegnlp34e5heds4hgj3f.py
# Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip]
# Source node to ATen node mapping:
# flip => rev
# Graph fragment:
# %rev : [num_users=1] = call_function[target=torch.ops.prims.rev.default](args = (%arg0_1, [2]), kwargs = {})
triton_poi_fused_flip_0 = async_compile.triton('triton_poi_fused_flip_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_flip_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x0 + ((-4)*x1) + (16*x2)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip]
stream0 = get_raw_stream(0)
triton_poi_fused_flip_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def vflip(input: 'torch.Tensor') ->torch.Tensor:
"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
"""
return torch.flip(input, [-2])
class Vflip(nn.Module):
"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.vflip(input)
tensor([[[0, 1, 1],
[0, 0, 0],
[0, 0, 0]]])
"""
def __init__(self) ->None:
super(Vflip, self).__init__()
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return vflip(input)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x0 + -4 * x1 + 16 * x2), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_flip_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def vflip(input: 'torch.Tensor') ->torch.Tensor:
"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
"""
return torch.flip(input, [-2])
class VflipNew(nn.Module):
"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
Examples:
>>> input = torch.tensor([[[
[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]])
>>> kornia.vflip(input)
tensor([[[0, 1, 1],
[0, 0, 0],
[0, 0, 0]]])
"""
def __init__(self) ->None:
super(VflipNew, self).__init__()
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
Vflip
| false | 9,264 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
ToLongTensor
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ml/cmltiy5hrv2wqhdqt3fezcfhgimkd7dolxjtsc7omtdmbpeu3exe.py
# Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# tensor => convert_element_type
# Graph fragment:
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.float32), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from typing import List
import torch.nn as nn
class ToLongTensor(nn.Module):
"""Convert a list of integers to long tensor
"""
def __init__(self):
super(ToLongTensor, self).__init__()
def forward(self, tokens: 'List[List[int]]') ->Tensor:
return torch.tensor(tokens)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ToLongTensorNew(nn.Module):
"""Convert a list of integers to long tensor
"""
def __init__(self):
super(ToLongTensorNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
LaudateCorpus1/text-1
|
ToLongTensor
| false | 9,265 |
[
"BSD-3-Clause"
] | 0 |
8808e7eee5a2df79b9566a4a348889dc2722fcfb
|
https://github.com/LaudateCorpus1/text-1/tree/8808e7eee5a2df79b9566a4a348889dc2722fcfb
|
ResidualBlockNoBN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/l3/cl3kktfjbfxvoqsgvjon5fk5ycnqjp7n2a3dk3gk4gw4n2jfe25m.py
# Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => convolution_1
# out_2 => add
# out_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_3), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf3, primals_5, primals_3, buf4, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class ResidualBlockNoBN(nn.Module):
"""
ResNet without Batch Normalisation
"""
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlockNoBN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=stride, padding=1,
bias=True)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=(1, 1), stride=
stride, bias=False))
def forward(self, x):
out = nn.ReLU()(self.conv1(x))
out = self.conv2(out)
out += self.shortcut(x)
out = nn.ReLU()(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf3, primals_5, primals_3, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4
class ResidualBlockNoBNNew(nn.Module):
"""
ResNet without Batch Normalisation
"""
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlockNoBNNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=stride, padding=1,
bias=True)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=(1, 1), stride=
stride, bias=False))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
LasseWolter/laughter-detection
|
ResidualBlockNoBN
| false | 9,266 |
[
"MIT"
] | 0 |
f0a37f8e991fc57e8bbc846695fc4dea84d60af5
|
https://github.com/LasseWolter/laughter-detection/tree/f0a37f8e991fc57e8bbc846695fc4dea84d60af5
|
RobertaClassificationHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ey/cey6dsgmzj2byupf73e6nwt5fetf5ne2sa57kzcmy7ejvaqhqb72.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf4, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from typing import Optional
class RobertaClassificationHead(nn.Module):
def __init__(self, num_classes, input_dim, inner_dim: 'Optional[int]'=
None, dropout: 'float'=0.1, activation=nn.ReLU):
super().__init__()
if not inner_dim:
inner_dim = input_dim
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
self.activation_fn = activation()
def forward(self, features):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_classes': 4, 'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2,
primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
buf2, (16, 4), (4, 1), 0), primals_4, buf4
class RobertaClassificationHeadNew(nn.Module):
def __init__(self, num_classes, input_dim, inner_dim: 'Optional[int]'=
None, dropout: 'float'=0.1, activation=nn.ReLU):
super().__init__()
if not inner_dim:
inner_dim = input_dim
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
self.activation_fn = activation()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
LaudateCorpus1/text-1
|
RobertaClassificationHead
| false | 9,267 |
[
"BSD-3-Clause"
] | 0 |
8808e7eee5a2df79b9566a4a348889dc2722fcfb
|
https://github.com/LaudateCorpus1/text-1/tree/8808e7eee5a2df79b9566a4a348889dc2722fcfb
|
RgbaToRgb
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/rp/crpkxw776b6qno57vzautehipdh4wlchhe27iw36f33ugyrc6wy3.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1, %getitem_2], -3), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 3
x0 = xindex % 16
x2 = (xindex // 48)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert image from RGBA to RGB.
See :class:`~kornia.color.RgbaToRgb` for details.
Args:
image (torch.Tensor): RGBA Image to be converted to RGB.
Returns:
torch.Tensor: RGB version of the image.
"""
if not torch.is_tensor(image):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')
r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3)
a_one = torch.tensor(1.0) - a
a_one * r + a * r
a_one * g + a * g
a_one * b + a * b
return torch.cat([r, g, b], dim=-3)
class RgbaToRgb(nn.Module):
"""Convert image from RGBA to RGB.
Remove an alpha channel from RGB image.
returns:
torch.Tensor: RGB version of the image.
shape:
- image: :math:`(*, 4, H, W)`
- output: :math:`(*, 3, H, W)`
Examples::
>>> input = torch.rand(2, 4, 4, 5)
>>> rgba = kornia.color.RgbaToRgb()
>>> output = rgba(input) # 2x3x4x5
"""
def __init__(self) ->None:
super(RgbaToRgb, self).__init__()
def forward(self, image: 'torch.Tensor') ->torch.Tensor:
return rgba_to_rgb(image)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 3, tl.int64)
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert image from RGBA to RGB.
See :class:`~kornia.color.RgbaToRgb` for details.
Args:
image (torch.Tensor): RGBA Image to be converted to RGB.
Returns:
torch.Tensor: RGB version of the image.
"""
if not torch.is_tensor(image):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')
r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3)
a_one = torch.tensor(1.0) - a
a_one * r + a * r
a_one * g + a * g
a_one * b + a * b
return torch.cat([r, g, b], dim=-3)
class RgbaToRgbNew(nn.Module):
"""Convert image from RGBA to RGB.
Remove an alpha channel from RGB image.
returns:
torch.Tensor: RGB version of the image.
shape:
- image: :math:`(*, 4, H, W)`
- output: :math:`(*, 3, H, W)`
Examples::
>>> input = torch.rand(2, 4, 4, 5)
>>> rgba = kornia.color.RgbaToRgb()
>>> output = rgba(input) # 2x3x4x5
"""
def __init__(self) ->None:
super(RgbaToRgbNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
IEM-Computer-Vision/kornia
|
RgbaToRgb
| false | 9,268 |
[
"ECL-2.0",
"Apache-2.0"
] | 0 |
f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
|
L2Norm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/az/cazayodov2pejo62rnac6rludmval2wsivw6phwx6vee62yxa77i.py
# Topologically Sorted Source Nodes: [pow_1, sum_1], Original ATen: [aten.pow, aten.sum]
# Source node to ATen node mapping:
# pow_1 => pow_1
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
triton_red_fused_pow_sum_0 = async_compile.triton('triton_red_fused_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = (xindex // 16)
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (2048*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/yb/cybccy6ik3u5wdvejbe7sswchqjx66t2tiskszpk5e3v4xddkzk3.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, clamp, rsqrt], Original ATen: [aten.pow, aten.sum, aten.clamp, aten.rsqrt]
# Source node to ATen node mapping:
# clamp => clamp_min
# pow_1 => pow_1
# rsqrt => rsqrt
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_1, 1e-12), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%clamp_min,), kwargs = {})
triton_per_fused_clamp_pow_rsqrt_sum_1 = async_compile.triton('triton_per_fused_clamp_pow_rsqrt_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.OUTER_TINY,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_pow_rsqrt_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-12
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = libdevice.rsqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/go/cgo6toghmmfwug7mqg2i6moqp5yn5zkdrlyxi73gxriq4acxdhks.py
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %rsqrt), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 512
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 8192)
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (4, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1], Original ATen: [aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_red_fused_pow_sum_0.run(primals_2, buf0, 256, 128, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [pow_1, sum_1, clamp, rsqrt], Original ATen: [aten.pow, aten.sum, aten.clamp, aten.rsqrt]
triton_per_fused_clamp_pow_rsqrt_sum_1.run(buf2, buf0, 64, 4, grid=grid(64), stream=stream0)
del buf0
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_1, primals_2, buf2, buf3, 32768, grid=grid(32768), stream=stream0)
del primals_1
return (buf3, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class L2Norm(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2Norm, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, data):
return self.scale * data * data.pow(2).sum(dim=1, keepdim=True).clamp(
min=1e-12).rsqrt()
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = xindex // 16
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 2048 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_per_fused_clamp_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-12
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = libdevice.rsqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 512
x3 = xindex
x0 = xindex % 16
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, None)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (4, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 4, 1, 16),
torch.float32)
get_raw_stream(0)
triton_red_fused_pow_sum_0[grid(256)](primals_2, buf0, 256, 128,
XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
triton_per_fused_clamp_pow_rsqrt_sum_1[grid(64)](buf2, buf0, 64, 4,
XBLOCK=64, num_warps=2, num_stages=1)
del buf0
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_mul_2[grid(32768)](primals_1, primals_2, buf2,
buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf3, primals_2, buf2
class L2NormNew(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2NormNew, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
KarthikGanesan88/stonne
|
L2Norm
| false | 9,269 |
[
"MIT"
] | 0 |
f228ade67120b9dafac8ea99d201e269b2ad7099
|
https://github.com/KarthikGanesan88/stonne/tree/f228ade67120b9dafac8ea99d201e269b2ad7099
|
Net
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/pr/cprd2wvus6goxkkguuoaogt4fubqcsb2qrqazrha42zvaiu54afh.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3600
x1 = (xindex // 3600)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(out_ptr0 + (x0 + (3616*x1)), tmp5, xmask)
tl.store(out_ptr1 + (x0 + (3712*x1)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5r/c5r26egf7qeeh7fjfq4ijywkfpebixqtdccifkxqdwcxnx7wx2k3.py
# Topologically Sorted Source Nodes: [x, x_1, x_3], Original ATen: [aten.convolution, aten.relu, aten.view]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# x_3 => view_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %view_1 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu, [-1, 36]), kwargs = {})
triton_poi_fused_convolution_relu_view_1 = async_compile.triton('triton_poi_fused_convolution_relu_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((3616*(x0 // 3600)) + (x0 % 3600)), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bk/cbkgymk7g7ueyo7jgcv5cwmq2tjtwuz5qbpwmenx2x47ruxwdw5e.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_4 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (5, 36), (36, 1))
assert_size_stride(primals_5, (5, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 60, 60), (3600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 1, 60, 60), (3616, 60, 60, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 60, 60), (3712, 3712, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf0, primals_2, buf1, buf6, 14400, grid=grid(14400), stream=stream0)
del primals_2
buf2 = reinterpret_tensor(buf0, (400, 36), (36, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1, x_3], Original ATen: [aten.convolution, aten.relu, aten.view]
triton_poi_fused_convolution_relu_view_1.run(buf1, buf2, 14400, grid=grid(14400), stream=stream0)
del buf1
buf3 = empty_strided_cuda((400, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (36, 5), (1, 36), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((400, 5), (5, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf5, 2000, grid=grid(2000), stream=stream0)
del primals_5
return (buf4, primals_1, primals_3, buf2, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((5, 36), (36, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, (5, 5), groups=1)
self.relu1 = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(36, 5)
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = x.view(-1, 36)
x = self.fc1(x)
x = self.relu2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3600
x1 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(out_ptr0 + (x0 + 3616 * x1), tmp5, xmask)
tl.store(out_ptr1 + (x0 + 3712 * x1), tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_view_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3616 * (x0 // 3600) + x0 % 3600), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (5, 36), (36, 1))
assert_size_stride(primals_5, (5,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 60, 60), (3600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 1, 60, 60), (3616, 60, 60, 1), torch.
float32)
buf6 = empty_strided_cuda((4, 1, 60, 60), (3712, 3712, 60, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(14400)](
buf0, primals_2, buf1, buf6, 14400, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
buf2 = reinterpret_tensor(buf0, (400, 36), (36, 1), 0)
del buf0
triton_poi_fused_convolution_relu_view_1[grid(14400)](buf1, buf2,
14400, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((400, 5), (5, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (36, 5), (1,
36), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((400, 5), (5, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(2000)](buf4,
primals_5, buf5, 2000, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_1, primals_3, buf2, buf5, primals_4, buf6
class NetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, (5, 5), groups=1)
self.relu1 = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(36, 5)
self.relu2 = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
KarthikGanesan88/stonne
|
Net
| false | 9,270 |
[
"MIT"
] | 0 |
f228ade67120b9dafac8ea99d201e269b2ad7099
|
https://github.com/KarthikGanesan88/stonne/tree/f228ade67120b9dafac8ea99d201e269b2ad7099
|
BuildingsModel
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/lp/clp5td7lbqtje3pt7v6xbcp766swgazqemomz2nzsxtdtmjesxht.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wy/cwyx3wa4jndgnwzcjpr33hhlviahccyeckxfax46ztwjbjc22gd7.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_6 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/j6/cj6faeofhfnxsh5iuwazughjlau4igyajnmvjequyelq7apzs4qm.py
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_7 => convolution_2
# x_8 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6y/c6yx6oq7oo2cwoaop3iwu5iqfdckg6lycdtu4jjuiv3wdcf2o6p7.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_13 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/d4/cd4s5ogbgu46xbdaa3oicwxi7l6pnddrap26pxiqzcpei77ta53h.py
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_14 => convolution_4
# x_15 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/a4/ca43wvja2n3mesrfuj54dcwx324bk23dhpnatmpi7kjryanvrx2z.py
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_20 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xm/cxmz3iknyzchppdbcm7wr6eaxxzpoahax5jalpj6fh3xvvzjnqzl.py
# Topologically Sorted Source Nodes: [x_21, x_22], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_21 => convolution_6
# x_22 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/f7/cf7o7wnf37yxmpneyxpuue2wp32cye5lrxh45re7d5apq2tbhhro.py
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_27 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/nn/cnnnq4kko5fnwaaj3dyc2icds7tzzfma73fo5puklebufk7h45d3.py
# Topologically Sorted Source Nodes: [x_28, x_29], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_28 => convolution_8
# x_29 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vt/cvtkhpy3oujzvltizkheo3m2lfdt7tarcd6iuke6ssvpf4d4jpmv.py
# Topologically Sorted Source Nodes: [x_35], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_35 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_10, %relu_7], -3), kwargs = {})
triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 256
x0 = xindex % 64
x2 = (xindex // 16384)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (8192*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 256, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (64*((-128) + x1)) + (8192*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ss/cssj4vemnjgzdwfpoowvbqjaebujsbqhslet4ughio2qw3spidob.py
# Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_44 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_13, %relu_5], -3), kwargs = {})
triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 128
x0 = xindex % 256
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (16384*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 128, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (256*((-64) + x1)) + (16384*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/fb/cfbh7kxk5krx3bvvif6ukfongvvxoiilpn4l7limujdttm6x4uzt.py
# Topologically Sorted Source Nodes: [x_53], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_53 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %relu_3], -3), kwargs = {})
triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 64
x0 = xindex % 1024
x2 = (xindex // 65536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (32768*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 64, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (1024*((-32) + x1)) + (32768*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/p5/cp54phyfpmyfhebtgeoqahl3sisnla77wis33pm6mlnou73sl5pt.py
# Topologically Sorted Source Nodes: [x_62], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_62 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_19, %relu_1], -3), kwargs = {})
triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 32
x0 = xindex % 4096
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (65536*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 32, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (4096*((-16) + x1)) + (65536*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hc/chcv4sut4x6gkpvyjvxibapsybsyryrojinvhzqnvsvdt24poezp.py
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# z => convolution_22
# Graph fragment:
# %convolution_22 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_17, %primals_46, %primals_47, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/sn/csni7cuzqtscrh2nxqz7pebg25g5v7fsbrhmusk4hmcn64gabawd.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# a => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution_22, [-3], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_22, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-3], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_14 = async_compile.triton('triton_poi_fused__softmax_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 8192)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x0 + (8192*x2)), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4096 + x0 + (8192*x2)), None, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x3), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (64, ), (1, ))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256, ), (1, ))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256, ), (1, ))
assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (128, ), (1, ))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128, ), (1, ))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_29, (64, ), (1, ))
assert_size_stride(primals_30, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (64, ), (1, ))
assert_size_stride(primals_32, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_33, (64, ), (1, ))
assert_size_stride(primals_34, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_35, (32, ), (1, ))
assert_size_stride(primals_36, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_37, (32, ), (1, ))
assert_size_stride(primals_38, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_39, (32, ), (1, ))
assert_size_stride(primals_40, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_41, (16, ), (1, ))
assert_size_stride(primals_42, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_43, (16, ), (1, ))
assert_size_stride(primals_44, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_45, (16, ), (1, ))
assert_size_stride(primals_46, (2, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_47, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 262144, grid=grid(262144), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 131072, grid=grid(131072), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 131072, grid=grid(131072), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 16, 16), (16384, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 65536, grid=grid(65536), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_17, x_18], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 65536, grid=grid(65536), stream=stream0)
del primals_13
buf16 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.float32)
buf17 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf15, buf16, buf17, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 128, 8, 8), (8192, 64, 8, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_21, x_22], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf19, primals_15, 32768, grid=grid(32768), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 8, 8), (8192, 64, 8, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_24, x_25], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf21, primals_17, 32768, grid=grid(32768), stream=stream0)
del primals_17
buf22 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf21, buf22, buf23, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 4, 4), (4096, 16, 4, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [x_28, x_29], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf25, primals_19, 16384, grid=grid(16384), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 4, 4), (4096, 16, 4, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [x_31, x_32], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf27, primals_21, 16384, grid=grid(16384), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_35], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf28, primals_23, buf21, buf29, 65536, grid=grid(65536), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [x_37, x_38], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf31, primals_25, 32768, grid=grid(32768), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [x_40, x_41], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf33, primals_27, 32768, grid=grid(32768), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, primals_28, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf34, (4, 64, 16, 16), (16384, 256, 16, 1))
buf35 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.cat]
triton_poi_fused_cat_10.run(buf34, primals_29, buf15, buf35, 131072, grid=grid(131072), stream=stream0)
del buf34
del primals_29
# Topologically Sorted Source Nodes: [x_46], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 64, 16, 16), (16384, 256, 16, 1))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [x_46, x_47], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf37, primals_31, 65536, grid=grid(65536), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [x_49], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 64, 16, 16), (16384, 256, 16, 1))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [x_49, x_50], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf39, primals_33, 65536, grid=grid(65536), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [x_52], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, primals_34, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf40, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf41 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_53], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf40, primals_35, buf9, buf41, 262144, grid=grid(262144), stream=stream0)
del buf40
del primals_35
# Topologically Sorted Source Nodes: [x_55], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [x_55, x_56], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf43, primals_37, 131072, grid=grid(131072), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [x_58], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf43, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf45 = buf44; del buf44 # reuse
# Topologically Sorted Source Nodes: [x_58, x_59], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf45, primals_39, 131072, grid=grid(131072), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [x_61], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_40, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf46, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf47 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_62], Original ATen: [aten.cat]
triton_poi_fused_cat_12.run(buf46, primals_41, buf3, buf47, 524288, grid=grid(524288), stream=stream0)
del buf46
del primals_41
# Topologically Sorted Source Nodes: [x_64], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf49 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [x_64, x_65], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf49, primals_43, 262144, grid=grid(262144), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [x_67], Original ATen: [aten.convolution]
buf50 = extern_kernels.convolution(buf49, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf51 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [x_67, x_68], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf51, primals_45, 262144, grid=grid(262144), stream=stream0)
del primals_45
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf51, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf53 = buf52; del buf52 # reuse
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf53, primals_47, 32768, grid=grid(32768), stream=stream0)
del primals_47
buf54 = reinterpret_tensor(buf28, (4, 2, 64, 64), (8192, 4096, 64, 1), 0); del buf28 # reuse
# Topologically Sorted Source Nodes: [a], Original ATen: [aten._softmax]
triton_poi_fused__softmax_14.run(buf53, buf54, 32768, grid=grid(32768), stream=stream0)
return (buf53, buf54, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, buf54, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((2, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from typing import List
from typing import Tuple
from typing import Union
import torch.nn as nn
class DownSamplingBlock(nn.Module):
def __init__(self, in_channels: 'int', channel_up_factor: 'int'=2,
max_pooling: 'bool'=True, dropout: 'Tuple'=(0, 0)):
super().__init__()
out_channels = in_channels * channel_up_factor
self.max_pooling = max_pooling
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.activation1 = nn.ReLU()
self.dropout1 = nn.Dropout2d(dropout[0])
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.activation2 = nn.ReLU()
self.dropout2 = nn.Dropout2d(dropout[1])
if self.max_pooling:
self.max = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x: 'Tensor') ->Union[Tensor, Tuple[Tensor]]:
x = self.conv1(x)
x = self.activation1(x)
x = self.dropout1(x)
x = self.conv2(x)
x = self.activation2(x)
x = self.dropout2(x)
skip_connection = x
if self.max_pooling:
x = self.max(x)
return x, skip_connection
return x
class UpSamplingBlock(nn.Module):
def __init__(self, in_channels: 'int', channel_down_factor: 'int',
skip_channels: 'int', dropout: 'Tuple'=(0, 0, 0)):
super().__init__()
out_channels = in_channels // channel_down_factor
self.transpose_conv2d = nn.ConvTranspose2d(in_channels,
out_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
self.dropout = nn.Dropout2d(dropout[0])
self.conv1 = nn.Conv2d(out_channels + skip_channels, out_channels,
kernel_size=3, stride=1, padding=1)
self.activation1 = nn.ReLU()
self.dropout1 = nn.Dropout2d(dropout[1])
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.activation2 = nn.ReLU()
self.dropout2 = nn.Dropout2d(dropout[2])
def forward(self, x: 'Tensor', skip_connection: 'Tensor') ->Tensor:
x = self.transpose_conv2d(x)
x = torch.cat([x, skip_connection], -3)
x = self.dropout(x)
x = self.conv1(x)
x = self.activation1(x)
x = self.dropout1(x)
x = self.conv2(x)
x = self.activation2(x)
x = self.dropout2(x)
return x
class BuildingsModel(nn.Module):
def __init__(self, in_channels: 'int', init_out_factor: 'int', dropouts:
'List'=[0] * 23):
super().__init__()
self.hooks = {}
self.down_1 = DownSamplingBlock(in_channels, init_out_factor,
dropout=(dropouts[0], dropouts[1]))
self.down_2 = DownSamplingBlock(self.down_1.conv2.out_channels, 2,
dropout=(dropouts[2], dropouts[3]))
self.down_3 = DownSamplingBlock(self.down_2.conv2.out_channels, 2,
dropout=(dropouts[4], dropouts[5]))
self.down_4 = DownSamplingBlock(self.down_3.conv2.out_channels, 2,
dropout=(dropouts[6], dropouts[7]))
self.down_5 = DownSamplingBlock(self.down_4.conv2.out_channels, 2,
max_pooling=False, dropout=(dropouts[8], dropouts[9]))
self.up_1 = UpSamplingBlock(self.down_5.conv2.out_channels, 2,
skip_channels=self.down_4.conv2.out_channels, dropout=(dropouts
[10], dropouts[11], dropouts[12]))
self.up_2 = UpSamplingBlock(self.up_1.conv2.out_channels, 2,
skip_channels=self.down_3.conv2.out_channels, dropout=(dropouts
[13], dropouts[14], dropouts[15]))
self.up_3 = UpSamplingBlock(self.up_2.conv2.out_channels, 2,
skip_channels=self.down_2.conv2.out_channels, dropout=(dropouts
[16], dropouts[17], dropouts[18]))
self.up_4 = UpSamplingBlock(self.up_3.conv2.out_channels, 2,
skip_channels=self.down_1.conv2.out_channels, dropout=(dropouts
[19], dropouts[20], dropouts[21]))
self.zconv = nn.Conv2d(self.up_4.conv2.out_channels, out_channels=2,
kernel_size=1)
self.prob = nn.Softmax(dim=-3)
self.__init_weights__()
def __init_weights__(self):
for m in self.modules():
if type(m) in {nn.Conv2d, nn.ConvTranspose2d}:
nn.init.kaiming_normal_(m.weight.data, mode='fan_in',
nonlinearity='relu')
if m.bias is not None:
m.bias.data.fill_(0)
def forward(self, x):
x, skip_connection_4 = self.down_1(x)
x, skip_connection_3 = self.down_2(x)
x, skip_connection_2 = self.down_3(x)
x, skip_connection_1 = self.down_4(x)
x = self.down_5(x)
x = self.up_1(x, skip_connection_1)
x = self.up_2(x, skip_connection_2)
x = self.up_3(x, skip_connection_3)
x = self.up_4(x, skip_connection_4)
z = self.zconv(x)
a = self.prob(z)
return z, a
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4, 'init_out_factor': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import Tensor
from typing import List
from typing import Tuple
from typing import Union
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 256
x0 = xindex % 64
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 8192 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-128 + x1) + 8192 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 128
x0 = xindex % 256
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-64 + x1) + 16384 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 64
x0 = xindex % 1024
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 32768 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-32 + x1) + 32768 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x0 = xindex % 4096
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 65536 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 32, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-16 + x1) + 65536 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 2
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused__softmax_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + (x0 + 8192 * x2), None, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4096 + x0 + 8192 * x2), None, eviction_policy
='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x3, tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_29, (64,), (1,))
assert_size_stride(primals_30, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (64,), (1,))
assert_size_stride(primals_32, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_33, (64,), (1,))
assert_size_stride(primals_34, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_35, (32,), (1,))
assert_size_stride(primals_36, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_37, (32,), (1,))
assert_size_stride(primals_38, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_39, (32,), (1,))
assert_size_stride(primals_40, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_41, (16,), (1,))
assert_size_stride(primals_42, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_43, (16,), (1,))
assert_size_stride(primals_44, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_45, (16,), (1,))
assert_size_stride(primals_46, (2, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_47, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(262144)](buf3, primals_5,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf3, buf4,
buf5, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(131072)](buf7, primals_7,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(131072)](buf9, primals_9,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf9, buf10,
buf11, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 16, 16), (16384, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(65536)](buf13, primals_11,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(65536)](buf15, primals_13,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf16 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf17 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf15,
buf16, buf17, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 128, 8, 8), (8192, 64, 8, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_6[grid(32768)](buf19, primals_15,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 8, 8), (8192, 64, 8, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_6[grid(32768)](buf21, primals_17,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf22 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf23 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.int8
)
triton_poi_fused_max_pool2d_with_indices_7[grid(8192)](buf21, buf22,
buf23, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 4, 4), (4096, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_8[grid(16384)](buf25, primals_19,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 4, 4), (4096, 16, 4, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_8[grid(16384)](buf27, primals_21,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(65536)](buf28, primals_23, buf21, buf29,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_6[grid(32768)](buf31, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_6[grid(32768)](buf33, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf34 = extern_kernels.convolution(buf33, primals_28, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf34, (4, 64, 16, 16), (16384, 256, 16, 1))
buf35 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_10[grid(131072)](buf34, primals_29, buf15,
buf35, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del buf34
del primals_29
buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 64, 16, 16), (16384, 256, 16, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_4[grid(65536)](buf37, primals_31,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_31
buf38 = extern_kernels.convolution(buf37, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 64, 16, 16), (16384, 256, 16, 1))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_4[grid(65536)](buf39, primals_33,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_33
buf40 = extern_kernels.convolution(buf39, primals_34, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf40, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf41 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_11[grid(262144)](buf40, primals_35, buf9,
buf41, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del buf40
del primals_35
buf42 = extern_kernels.convolution(buf41, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_2[grid(131072)](buf43, primals_37,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_37
buf44 = extern_kernels.convolution(buf43, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf45 = buf44
del buf44
triton_poi_fused_convolution_relu_2[grid(131072)](buf45, primals_39,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_39
buf46 = extern_kernels.convolution(buf45, primals_40, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf46, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf47 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_12[grid(524288)](buf46, primals_41, buf3,
buf47, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
del buf46
del primals_41
buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_0[grid(262144)](buf49, primals_43,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf50 = extern_kernels.convolution(buf49, primals_44, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_0[grid(262144)](buf51, primals_45,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_45
buf52 = extern_kernels.convolution(buf51, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf53 = buf52
del buf52
triton_poi_fused_convolution_13[grid(32768)](buf53, primals_47,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
buf54 = reinterpret_tensor(buf28, (4, 2, 64, 64), (8192, 4096, 64,
1), 0)
del buf28
triton_poi_fused__softmax_14[grid(32768)](buf53, buf54, 32768,
XBLOCK=256, num_warps=4, num_stages=1)
return (buf53, buf54, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, primals_30, primals_32, primals_34, primals_36,
primals_38, primals_40, primals_42, primals_44, primals_46, buf1,
buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16,
buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31,
buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49,
buf51, buf54)
class DownSamplingBlock(nn.Module):
def __init__(self, in_channels: 'int', channel_up_factor: 'int'=2,
max_pooling: 'bool'=True, dropout: 'Tuple'=(0, 0)):
super().__init__()
out_channels = in_channels * channel_up_factor
self.max_pooling = max_pooling
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.activation1 = nn.ReLU()
self.dropout1 = nn.Dropout2d(dropout[0])
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.activation2 = nn.ReLU()
self.dropout2 = nn.Dropout2d(dropout[1])
if self.max_pooling:
self.max = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x: 'Tensor') ->Union[Tensor, Tuple[Tensor]]:
x = self.conv1(x)
x = self.activation1(x)
x = self.dropout1(x)
x = self.conv2(x)
x = self.activation2(x)
x = self.dropout2(x)
skip_connection = x
if self.max_pooling:
x = self.max(x)
return x, skip_connection
return x
class UpSamplingBlock(nn.Module):
def __init__(self, in_channels: 'int', channel_down_factor: 'int',
skip_channels: 'int', dropout: 'Tuple'=(0, 0, 0)):
super().__init__()
out_channels = in_channels // channel_down_factor
self.transpose_conv2d = nn.ConvTranspose2d(in_channels,
out_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
self.dropout = nn.Dropout2d(dropout[0])
self.conv1 = nn.Conv2d(out_channels + skip_channels, out_channels,
kernel_size=3, stride=1, padding=1)
self.activation1 = nn.ReLU()
self.dropout1 = nn.Dropout2d(dropout[1])
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.activation2 = nn.ReLU()
self.dropout2 = nn.Dropout2d(dropout[2])
def forward(self, x: 'Tensor', skip_connection: 'Tensor') ->Tensor:
x = self.transpose_conv2d(x)
x = torch.cat([x, skip_connection], -3)
x = self.dropout(x)
x = self.conv1(x)
x = self.activation1(x)
x = self.dropout1(x)
x = self.conv2(x)
x = self.activation2(x)
x = self.dropout2(x)
return x
class BuildingsModelNew(nn.Module):
def __init__(self, in_channels: 'int', init_out_factor: 'int', dropouts:
'List'=[0] * 23):
super().__init__()
self.hooks = {}
self.down_1 = DownSamplingBlock(in_channels, init_out_factor,
dropout=(dropouts[0], dropouts[1]))
self.down_2 = DownSamplingBlock(self.down_1.conv2.out_channels, 2,
dropout=(dropouts[2], dropouts[3]))
self.down_3 = DownSamplingBlock(self.down_2.conv2.out_channels, 2,
dropout=(dropouts[4], dropouts[5]))
self.down_4 = DownSamplingBlock(self.down_3.conv2.out_channels, 2,
dropout=(dropouts[6], dropouts[7]))
self.down_5 = DownSamplingBlock(self.down_4.conv2.out_channels, 2,
max_pooling=False, dropout=(dropouts[8], dropouts[9]))
self.up_1 = UpSamplingBlock(self.down_5.conv2.out_channels, 2,
skip_channels=self.down_4.conv2.out_channels, dropout=(dropouts
[10], dropouts[11], dropouts[12]))
self.up_2 = UpSamplingBlock(self.up_1.conv2.out_channels, 2,
skip_channels=self.down_3.conv2.out_channels, dropout=(dropouts
[13], dropouts[14], dropouts[15]))
self.up_3 = UpSamplingBlock(self.up_2.conv2.out_channels, 2,
skip_channels=self.down_2.conv2.out_channels, dropout=(dropouts
[16], dropouts[17], dropouts[18]))
self.up_4 = UpSamplingBlock(self.up_3.conv2.out_channels, 2,
skip_channels=self.down_1.conv2.out_channels, dropout=(dropouts
[19], dropouts[20], dropouts[21]))
self.zconv = nn.Conv2d(self.up_4.conv2.out_channels, out_channels=2,
kernel_size=1)
self.prob = nn.Softmax(dim=-3)
self.__init_weights__()
def __init_weights__(self):
for m in self.modules():
if type(m) in {nn.Conv2d, nn.ConvTranspose2d}:
nn.init.kaiming_normal_(m.weight.data, mode='fan_in',
nonlinearity='relu')
if m.bias is not None:
m.bias.data.fill_(0)
def forward(self, input_0):
primals_1 = self.down_1.conv1.weight
primals_2 = self.down_1.conv1.bias
primals_4 = self.down_1.conv2.weight
primals_5 = self.down_1.conv2.bias
primals_6 = self.down_2.conv1.weight
primals_7 = self.down_2.conv1.bias
primals_8 = self.down_2.conv2.weight
primals_9 = self.down_2.conv2.bias
primals_10 = self.down_3.conv1.weight
primals_11 = self.down_3.conv1.bias
primals_12 = self.down_3.conv2.weight
primals_13 = self.down_3.conv2.bias
primals_14 = self.down_4.conv1.weight
primals_15 = self.down_4.conv1.bias
primals_16 = self.down_4.conv2.weight
primals_17 = self.down_4.conv2.bias
primals_18 = self.down_5.conv1.weight
primals_19 = self.down_5.conv1.bias
primals_20 = self.down_5.conv2.weight
primals_21 = self.down_5.conv2.bias
primals_22 = self.up_1.transpose_conv2d.weight
primals_23 = self.up_1.transpose_conv2d.bias
primals_24 = self.up_1.conv1.weight
primals_25 = self.up_1.conv1.bias
primals_26 = self.up_1.conv2.weight
primals_27 = self.up_1.conv2.bias
primals_28 = self.up_2.transpose_conv2d.weight
primals_29 = self.up_2.transpose_conv2d.bias
primals_30 = self.up_2.conv1.weight
primals_31 = self.up_2.conv1.bias
primals_32 = self.up_2.conv2.weight
primals_33 = self.up_2.conv2.bias
primals_34 = self.up_3.transpose_conv2d.weight
primals_35 = self.up_3.transpose_conv2d.bias
primals_36 = self.up_3.conv1.weight
primals_37 = self.up_3.conv1.bias
primals_38 = self.up_3.conv2.weight
primals_39 = self.up_3.conv2.bias
primals_40 = self.up_4.transpose_conv2d.weight
primals_41 = self.up_4.transpose_conv2d.bias
primals_42 = self.up_4.conv1.weight
primals_43 = self.up_4.conv1.bias
primals_44 = self.up_4.conv2.weight
primals_45 = self.up_4.conv2.bias
primals_46 = self.zconv.weight
primals_47 = self.zconv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0], output[1]
|
JosefDoun/Ikonos-2-Building-Segmentation-U-Net
|
BuildingsModel
| false | 9,271 |
[
"MIT"
] | 0 |
fecb9874dbf74886fd30d00b8561dfc66886be8c
|
https://github.com/JosefDoun/Ikonos-2-Building-Segmentation-U-Net/tree/fecb9874dbf74886fd30d00b8561dfc66886be8c
|
DuelingQNetwork
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [val], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# val => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7q/c7q4bwj4hxfdsged3oobb6yehaxytrqsbcvu6n6kmgwwpgq4o2zm.py
# Topologically Sorted Source Nodes: [add, mean, sub], Original ATen: [aten.add, aten.mean, aten.sub]
# Source node to ATen node mapping:
# add => add
# mean => mean
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_11), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%view_11,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_per_fused_add_mean_sub_1 = async_compile.triton('triton_per_fused_add_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sub_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r2 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (0))
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp7 = tmp4 + tmp6
tmp8 = tmp7 + tmp0
tmp9 = 256.0
tmp10 = tmp3 / tmp9
tmp11 = tmp8 - tmp10
tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (128, 4), (4, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (128, 128), (128, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [val], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf15, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [val_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf14, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), out=buf4)
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 128), (1, 4), 0), out=buf5)
del primals_8
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf5 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [adv], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf6, primals_9, buf13, 8192, grid=grid(8192), stream=stream0)
del primals_9
buf7 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (64, 128), (128, 1), 0), reinterpret_tensor(primals_10, (128, 128), (1, 128), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf7 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [adv_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf8, primals_11, buf12, 8192, grid=grid(8192), stream=stream0)
del primals_11
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [adv_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 128), (128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf9)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, sub], Original ATen: [aten.add, aten.mean, aten.sub]
triton_per_fused_add_mean_sub_1.run(buf9, buf4, primals_7, buf11, 1, 256, grid=grid(1), stream=stream0)
del buf4
del buf9
del primals_7
return (buf11, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(buf6, (64, 128), (128, 1), 0), reinterpret_tensor(buf8, (64, 128), (128, 1), 0), primals_12, buf12, primals_10, buf13, primals_6, buf14, primals_4, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class DuelingQNetwork(nn.Module):
def __init__(self, state_size, action_size, hidsize1=128, hidsize2=128):
super(DuelingQNetwork, self).__init__()
self.fc1_val = nn.Linear(state_size, hidsize1)
self.fc2_val = nn.Linear(hidsize1, hidsize2)
self.fc4_val = nn.Linear(hidsize2, 1)
self.fc1_adv = nn.Linear(state_size, hidsize1)
self.fc2_adv = nn.Linear(hidsize1, hidsize2)
self.fc4_adv = nn.Linear(hidsize2, action_size)
def forward(self, x):
val = F.relu(self.fc1_val(x))
val = F.relu(self.fc2_val(val))
val = self.fc4_val(val)
adv = F.relu(self.fc1_adv(x))
adv = F.relu(self.fc2_adv(adv))
adv = self.fc4_adv(adv)
return val + adv - adv.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_per_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp7 = tmp4 + tmp6
tmp8 = tmp7 + tmp0
tmp9 = 256.0
tmp10 = tmp3 / tmp9
tmp11 = tmp8 - tmp10
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (128, 4), (4, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (128, 128), (128, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf15, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf14 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3,
primals_5, buf14, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), out=buf4)
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 128), (1, 4), 0), out=buf5)
del primals_8
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf5
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf6,
primals_9, buf13, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_10, (128, 128), (1, 128), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf7
buf12 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf8,
primals_11, buf12, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf9)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_mean_sub_1[grid(1)](buf9, buf4, primals_7,
buf11, 1, 256, num_warps=2, num_stages=1)
del buf4
del buf9
del primals_7
return buf11, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), reinterpret_tensor(buf6, (64, 128), (128, 1), 0
), reinterpret_tensor(buf8, (64, 128), (128, 1), 0
), primals_12, buf12, primals_10, buf13, primals_6, buf14, primals_4, buf15
class DuelingQNetworkNew(nn.Module):
def __init__(self, state_size, action_size, hidsize1=128, hidsize2=128):
super(DuelingQNetworkNew, self).__init__()
self.fc1_val = nn.Linear(state_size, hidsize1)
self.fc2_val = nn.Linear(hidsize1, hidsize2)
self.fc4_val = nn.Linear(hidsize2, 1)
self.fc1_adv = nn.Linear(state_size, hidsize1)
self.fc2_adv = nn.Linear(hidsize1, hidsize2)
self.fc4_adv = nn.Linear(hidsize2, action_size)
def forward(self, input_0):
primals_1 = self.fc1_val.weight
primals_2 = self.fc1_val.bias
primals_4 = self.fc2_val.weight
primals_5 = self.fc2_val.bias
primals_6 = self.fc4_val.weight
primals_7 = self.fc4_val.bias
primals_8 = self.fc1_adv.weight
primals_9 = self.fc1_adv.bias
primals_10 = self.fc2_adv.weight
primals_11 = self.fc2_adv.bias
primals_12 = self.fc4_adv.weight
primals_13 = self.fc4_adv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
LuckUVeryX/flatland-kit
|
DuelingQNetwork
| false | 9,272 |
[
"MIT"
] | 0 |
3127c072b2f26fa0a0f4b45888672c11b80acfd3
|
https://github.com/LuckUVeryX/flatland-kit/tree/3127c072b2f26fa0a0f4b45888672c11b80acfd3
|
EmbedNoise
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/eq/ceqlzwqlgqnd5jih74vktx4hlpfsbm6lcksehyx7mnuogxj6jtqg.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# out_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 16384, grid=grid(16384), stream=stream0)
del buf0
del primals_2
return (reinterpret_tensor(buf2, (64, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def _sn_to_specnorm(sn: 'int'):
if sn > 0:
def specnorm(module):
return nn.utils.spectral_norm(module, n_power_iterations=sn)
else:
def specnorm(module, **kw):
return module
return specnorm
class EmbedNoise(nn.Module):
def __init__(self, z_dim, channels, dim=4, sn=0):
super().__init__()
specnorm = _sn_to_specnorm(sn)
self.pad = nn.Linear(z_dim, channels * dim * dim * dim)
self.pad = specnorm(self.pad)
self.nonlin = nn.LeakyReLU()
self.z_dim = z_dim
self.channels = channels
self.dim = dim
def forward(self, z):
out = self.pad(z)
out = self.nonlin(out)
out = out.view((-1, self.channels, self.dim, self.dim, self.dim))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'z_dim': 4, 'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(16384)](buf0, primals_2, buf1,
buf2, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return reinterpret_tensor(buf2, (64, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
def _sn_to_specnorm(sn: 'int'):
if sn > 0:
def specnorm(module):
return nn.utils.spectral_norm(module, n_power_iterations=sn)
else:
def specnorm(module, **kw):
return module
return specnorm
class EmbedNoiseNew(nn.Module):
def __init__(self, z_dim, channels, dim=4, sn=0):
super().__init__()
specnorm = _sn_to_specnorm(sn)
self.pad = nn.Linear(z_dim, channels * dim * dim * dim)
self.pad = specnorm(self.pad)
self.nonlin = nn.LeakyReLU()
self.z_dim = z_dim
self.channels = channels
self.dim = dim
def forward(self, input_0):
primals_1 = self.pad.weight
primals_2 = self.pad.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KirillShmilovich/coarse2fine_VAE
|
EmbedNoise
| false | 9,273 |
[
"MIT"
] | 0 |
e4c1022f9570934a2be59ea0989c80102dc46ad4
|
https://github.com/KirillShmilovich/coarse2fine_VAE/tree/e4c1022f9570934a2be59ea0989c80102dc46ad4
|
LayerNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/dg/cdgw6x7nju4bzp2wyuwgeanbco7zcjis6yiusovvnpz6zw3yjd3l.py
# Topologically Sorted Source Nodes: [u, sub], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# sub => sub
# u => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qa/cqanrp6ysxh6sybzulc3onfaha6cuqejs54bwpkhct7ohd5rdj6b.py
# Topologically Sorted Source Nodes: [pow_1, s, add, sqrt, x, mul, add_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_1 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u, sub], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add, sqrt, x, mul, add_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_1.run(primals_2, buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.optim
class LayerNorm(nn.Module):
"""Construct a layernorm module in the OpenAI style (epsilon inside the square root)."""
def __init__(self, n_state, e=1e-05):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_state': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_2,
buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormNew(nn.Module):
"""Construct a layernorm module in the OpenAI style (epsilon inside the square root)."""
def __init__(self, n_state, e=1e-05):
super(LayerNormNew, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, input_0):
primals_2 = self.g
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
LouisCastricato/comet-commonsense
|
LayerNorm
| false | 9,274 |
[
"Apache-2.0"
] | 0 |
dd27c0f1f4a5cc75a11329611721a21a0f5a049f
|
https://github.com/LouisCastricato/comet-commonsense/tree/dd27c0f1f4a5cc75a11329611721a21a0f5a049f
|
GCT
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/vt/cvtlwsahn5oxfhga3dwjdswt3bhkvbh4zkaufpwgeopelblrwpnw.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, pow_2], Original ATen: [aten.pow, aten.sum, aten.add]
# Source node to ATen node mapping:
# add => add
# pow_1 => pow_1
# pow_2 => pow_2
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-05), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
triton_per_fused_add_pow_sum_0 = async_compile.triton('triton_per_fused_add_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bk/cbk3oabqcrqeiz2rnrvk4rxkdmk4ef46ye5mdaspijuw6pzegair.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# embedding => mul
# mean => mean
# pow_3 => pow_3
# pow_4 => pow_4
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
triton_poi_fused_add_mean_mul_pow_1 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3))
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + (x0), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ni/cnixk7pwsdlgsjxwsmpbrzopu3rzfg26mvtp5cemfxo35lnktysi.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# embedding => mul
# gate => add_3
# mean => mean
# mul_1 => mul_1
# norm => div
# pow_3 => pow_3
# pow_4 => pow_4
# tanh => tanh
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_tanh_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vz/cvzd7uxhjcuuvhocpajasfl7npgg3w4tjtzribzneczbe5yv6xr6.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate, mul_2], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# embedding => mul
# gate => add_3
# mean => mean
# mul_1 => mul_1
# mul_2 => mul_2
# norm => div
# pow_3 => pow_3
# pow_4 => pow_4
# tanh => tanh
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %add_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_tanh_3 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, pow_2], Original ATen: [aten.pow, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_pow_sum_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add]
triton_poi_fused_add_mean_mul_pow_1.run(buf1, primals_2, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
triton_poi_fused_add_div_mean_mul_pow_tanh_2.run(buf1, primals_2, primals_3, buf2, primals_4, buf3, 16, grid=grid(16), stream=stream0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate, mul_2], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
triton_poi_fused_add_div_mean_mul_pow_tanh_3.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, primals_1, primals_2, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import sys
import torch
import torch.nn as nn
import torch.utils.data.distributed
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon
).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) +
self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2, 3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=
True) + self.epsilon)
else:
None
sys.exit()
gate = 1.0 + torch.tanh(embedding * norm + self.beta)
return x * gate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 1)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + 2)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + 3)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + x0, tmp27, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_pow_sum_0[grid(16)](buf1, primals_1, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_poi_fused_add_mean_mul_pow_1[grid(4)](buf1, primals_2, buf2,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_2[grid(16)](buf1,
primals_2, primals_3, buf2, primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_3[grid(256)](primals_1,
buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
return buf4, primals_1, primals_2, primals_3, primals_4, buf1
class GCTNew(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCTNew, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.gamma
primals_4 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Erfun76/insightface
|
GCT
| false | 9,275 |
[
"MIT"
] | 0 |
148cef36a43a055f68d2b6a475f4aa38625ad8b4
|
https://github.com/Erfun76/insightface/tree/148cef36a43a055f68d2b6a475f4aa38625ad8b4
|
RingLoss
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/li/cli63qqsdizise6efdfq5upeadzy7c4nnnl6bxsonf2qrbibqd6n.py
# Topologically Sorted Source Nodes: [norm, sub, pow_1, l, mul], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# l => mean
# mul => mul
# norm => pow_1, pow_2, sum_1
# pow_1 => pow_3
# sub => sub
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_4, 2.0), kwargs = {})
triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp12 = tl.load(in_ptr1 + (0))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp14 = tmp11 - tmp13
tmp15 = 2.0
tmp16 = tmp14 * tmp15
tmp17 = tmp14 * tmp14
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp20 / tmp21
tmp23 = 1.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr1 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp16, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [norm, sub, pow_1, l, mul], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0.run(buf3, primals_1, primals_2, buf2, 1, 64, grid=grid(1), stream=stream0)
del primals_1
del primals_2
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
from torch import nn
class RingLoss(nn.Module):
"""Ring loss.
Reference:
Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018.
"""
def __init__(self, weight_ring=1.0):
super(RingLoss, self).__init__()
self.radius = nn.Parameter(torch.ones(1, dtype=torch.float))
self.weight_ring = weight_ring
def forward(self, x):
l = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean()
return l * self.weight_ring
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr1 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp14 = tmp11 - tmp13
tmp15 = 2.0
tmp16 = tmp14 * tmp15
tmp17 = tmp14 * tmp14
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp20 / tmp21
tmp23 = 1.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0[grid(1)](buf3,
primals_1, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_1
del primals_2
return buf3, buf2
class RingLossNew(nn.Module):
"""Ring loss.
Reference:
Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018.
"""
def __init__(self, weight_ring=1.0):
super(RingLossNew, self).__init__()
self.radius = nn.Parameter(torch.ones(1, dtype=torch.float))
self.weight_ring = weight_ring
def forward(self, input_0):
primals_2 = self.radius
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Luxios22/Dual_Norm
|
RingLoss
| false | 9,276 |
[
"MIT"
] | 0 |
b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
https://github.com/Luxios22/Dual_Norm/tree/b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
InterpolationBlock
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zk/czk6raopzmdgdv6d5hvyxoik5jp7pujsiqh34ikcwzvkv5yezwe7.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 1.0), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = triton_helpers.minimum(tmp23, tmp2)
tmp25 = tmp20 * tmp24
tmp26 = tmp16 + tmp25
tmp27 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp24
tmp31 = tmp27 + tmp30
tmp32 = tmp26 - tmp31
tmp33 = tmp6.to(tl.float32)
tmp34 = tmp5 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = triton_helpers.minimum(tmp35, tmp2)
tmp37 = tmp32 * tmp36
tmp38 = tmp31 + tmp37
tl.store(in_out_ptr0 + (x4), tmp38, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.utils.data.distributed
class InterpolationBlock(nn.Module):
"""
Interpolation upsampling block.
Parameters:
----------
scale_factor : float
Multiplier for spatial size.
mode : str, default 'bilinear'
Algorithm used for upsampling.
align_corners : bool, default True
Whether to align the corner pixels of the input and output tensors.
"""
def __init__(self, scale_factor, mode='bilinear', align_corners=True):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(input=x, scale_factor=self.scale_factor, mode=
self.mode, align_corners=self.align_corners)
def __repr__(self):
s = (
'{name}(scale_factor={scale_factor}, mode={mode}, align_corners={align_corners})'
)
return s.format(name=self.__class__.__name__, scale_factor=self.
scale_factor, mode=self.mode, align_corners=self.align_corners)
def calc_flops(self, x):
assert x.shape[0] == 1
if self.mode == 'bilinear':
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale_factor': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = triton_helpers.minimum(tmp23, tmp2)
tmp25 = tmp20 * tmp24
tmp26 = tmp16 + tmp25
tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp24
tmp31 = tmp27 + tmp30
tmp32 = tmp26 - tmp31
tmp33 = tmp6.to(tl.float32)
tmp34 = tmp5 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = triton_helpers.minimum(tmp35, tmp2)
tmp37 = tmp32 * tmp36
tmp38 = tmp31 + tmp37
tl.store(in_out_ptr0 + x4, tmp38, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(256)](buf1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class InterpolationBlockNew(nn.Module):
"""
Interpolation upsampling block.
Parameters:
----------
scale_factor : float
Multiplier for spatial size.
mode : str, default 'bilinear'
Algorithm used for upsampling.
align_corners : bool, default True
Whether to align the corner pixels of the input and output tensors.
"""
def __init__(self, scale_factor, mode='bilinear', align_corners=True):
super(InterpolationBlockNew, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def __repr__(self):
s = (
'{name}(scale_factor={scale_factor}, mode={mode}, align_corners={align_corners})'
)
return s.format(name=self.__class__.__name__, scale_factor=self.
scale_factor, mode=self.mode, align_corners=self.align_corners)
def calc_flops(self, x):
assert x.shape[0] == 1
if self.mode == 'bilinear':
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Erfun76/insightface
|
InterpolationBlock
| false | 9,277 |
[
"MIT"
] | 0 |
148cef36a43a055f68d2b6a475f4aa38625ad8b4
|
https://github.com/Erfun76/insightface/tree/148cef36a43a055f68d2b6a475f4aa38625ad8b4
|
Net
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x1_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gk/cgkeox2n6lb2433ix673nzbipppptbujeertkjua6x5dbykrv7hm.py
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gq/cgqvpghyzwkbypvbvlyig6ohuplw6qvhn73hkwj6auj5e2m5mqio.py
# Topologically Sorted Source Nodes: [x2_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x2_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/j3/cj3zbji3l46wtxryvc5jqoh2q37gqucxaht67n3y6zuw4jwd64p7.py
# Topologically Sorted Source Nodes: [conv2d_2, x3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/sa/csaftzx2wizpjlh3kfhvehexsbodntymkhlieviiunpbabb3qdpw.py
# Topologically Sorted Source Nodes: [x3_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x3_1 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xm/cxmz3iknyzchppdbcm7wr6eaxxzpoahax5jalpj6fh3xvvzjnqzl.py
# Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x4 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/f7/cf7o7wnf37yxmpneyxpuue2wp32cye5lrxh45re7d5apq2tbhhro.py
# Topologically Sorted Source Nodes: [x4_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x4_1 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mj/cmjkcfeo4tubxif7tg6745nnhg3scou4xmvjwqrgzmtgyrg7rmiy.py
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x5_1 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_8 = async_compile.triton('triton_poi_fused__to_copy_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_8(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rs/crseilwit3k4wvqimdnrd6awbgjcfehgp62sszhpfv57pb4m4zrm.py
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x5_1 => add, clamp_max
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 3), kwargs = {})
triton_poi_fused_add_clamp_9 = async_compile.triton('triton_poi_fused_add_clamp_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_9(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pi/cpiop5fcjvtv63ctxzn3d4rh4j75t34xslokppayb4zvewqp6l7i.py
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x5_1 => clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0.42857142857142855), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_10 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_10(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qw/cqwrnokux24uvj3zc4nfkgobdpr5hu37oljptx3osogfzex6dcgv.py
# Topologically Sorted Source Nodes: [conv2d_4, x5, x5_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x5 => relu_4
# x5_1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, mul_2, mul_3, mul_4, sub_1, sub_2, sub_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_11 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x6 = (xindex // 64)
x2 = (xindex // 64) % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + (4*tmp28) + (16*x6)), None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + (4*tmp28) + (16*x6)), None, eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pn/cpn6tjdxb6btztw3ol742yphqj6ezpbpcpqqg27uideruz7snd6b.py
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x6_1 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_12 = async_compile.triton('triton_poi_fused__to_copy_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uc/cucvwpcqoa76tdqscblojrtifxyubw7f75qcjojgzv4dsve6jrp6.py
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x6_1 => add_5, clamp_max_4
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_5, 7), kwargs = {})
triton_poi_fused_add_clamp_13 = async_compile.triton('triton_poi_fused_add_clamp_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 7, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uu/cuu4orlfbmhm5cvtvhxoioht2tj77ih5l5navsqwodi4tzxgl6v6.py
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x6_1 => clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_5, sub_5
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_4, 0.4666666666666667), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_5, 0.0), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_14 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dk/cdkwc6le4ie4tnrsax4xfcztxgmzepd5emq5tvoqmelftctmorjb.py
# Topologically Sorted Source Nodes: [conv2d_5, x6, x6_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# x6 => relu_5
# x6_1 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_7, add_8, add_9, mul_7, mul_8, mul_9, sub_6, sub_7, sub_9
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_6), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_6), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %add_7), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_7), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_9), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_15 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x6 = (xindex // 256)
x2 = (xindex // 256) % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + (8*tmp28) + (64*x6)), None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + (8*tmp28) + (64*x6)), None, eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/25/c25s4wujr3belrbdr2scgvcvc7klf5wyhbl5hlirnhpq723d5o4v.py
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x7_1 => convert_element_type_9
# Graph fragment:
# %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_16 = async_compile.triton('triton_poi_fused__to_copy_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_16(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/t2/ct2xckimv3iy6o6wbogbtcvg4557ggylf7k4yhrmdyptqvydpwdh.py
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x7_1 => add_10, clamp_max_8
# Graph fragment:
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {})
# %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_10, 15), kwargs = {})
triton_poi_fused_add_clamp_17 = async_compile.triton('triton_poi_fused_add_clamp_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_17(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 15, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/au/cauonbjwa2nbu2kb2chgjgk6x4zuwrobfqkwhuxbrfr4pfqaldrq.py
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x7_1 => clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_10, sub_10
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_8, 0.4838709677419355), kwargs = {})
# %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_10, 0.0), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_18 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/me/cmejwancphprl3gevbf3x7ue5gwrd57kmgnphshowzrl5mp3qhmu.py
# Topologically Sorted Source Nodes: [conv2d_6, x7, x7_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x7 => relu_6
# x7_1 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_12, add_13, add_14, mul_12, mul_13, mul_14, sub_11, sub_12, sub_14
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_9, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_10), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_12), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_10), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_13), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %clamp_max_11), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_14), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_19 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_19(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x6 = (xindex // 1024)
x2 = (xindex // 1024) % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + (16*tmp28) + (256*x6)), None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + (16*tmp28) + (256*x6)), None, eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bp/cbpp2gzegqebodwq3dmsyjmgtjww47xdirocnam66urpn6k5idz4.py
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x8_1 => convert_element_type_13
# Graph fragment:
# %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_20 = async_compile.triton('triton_poi_fused__to_copy_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_20(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ru/cru5iptigztikjrjv73ns5grhdkp2pjxsf3slcmjffaa5tlja4ul.py
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x8_1 => add_15, clamp_max_12
# Graph fragment:
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {})
# %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 31), kwargs = {})
triton_poi_fused_add_clamp_21 = async_compile.triton('triton_poi_fused_add_clamp_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_21(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 31, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/sq/csq6p7g3kv47f3itruvtb5eo7u4zkoevlcdxwlooaikxeznepehw.py
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
# Source node to ATen node mapping:
# x8_1 => clamp_max_14, clamp_min_12, clamp_min_14, convert_element_type_12, iota_6, mul_15, sub_15
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_6, torch.float32), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_12, 0.49206349206349204), kwargs = {})
# %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_15, 0.0), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0.0), kwargs = {})
# %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {})
triton_poi_fused__to_copy_arange_clamp_mul_sub_22 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_22(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/y2/cy2mkv323j3vxqwupculspj4fbth55uo64mgkwrdth2c4ae4rvqe.py
# Topologically Sorted Source Nodes: [conv2d_7, x8, x8_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x8 => relu_7
# x8_1 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_17, add_18, add_19, mul_17, mul_18, mul_19, sub_16, sub_17, sub_19
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_14, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max_14), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_17), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_14), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_18), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_18, %add_17), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %clamp_max_15), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %mul_19), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_23 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_23(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x6 = (xindex // 4096)
x2 = (xindex // 4096) % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + (32*tmp28) + (1024*x6)), None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + (32*tmp28) + (1024*x6)), None, eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/n6/cn6xjevg27wpclqaheqxdrv7ortplyb7afgghyfczfly34inbwle.py
# Topologically Sorted Source Nodes: [x11, y], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# x11 => convolution_10
# y => sigmoid
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_sigmoid_24 = async_compile.triton('triton_poi_fused_convolution_sigmoid_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wx/cwxe6la5l3yabcfhbrrnptrtohzikeu7kvv6k3ahehdtcatueznv.py
# Topologically Sorted Source Nodes: [conv2d_7, x8], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x8 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_14, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_25 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_25(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xx/cxxo2m7uxsctgw5l64od6d6bydhkdlbxllaxwmohfhultz6zhllx.py
# Topologically Sorted Source Nodes: [conv2d_6, x7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x7 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_9, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_26 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_26(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mj/cmjd5nfa35l7fiqhjzgx7u2xibmmcn4frgi6on5w4qpfoxkdrhcl.py
# Topologically Sorted Source Nodes: [conv2d_5, x6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# x6 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_27 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_27(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kb/ckb4w6f656zg5rxre7fokkz7llx34cknxh5rica2tvqq6undnhyg.py
# Topologically Sorted Source Nodes: [conv2d_4, x5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x5 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_28 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (64, ), (1, ))
assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_21, (64, ), (1, ))
assert_size_stride(primals_22, (4, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_23, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 262144, grid=grid(262144), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x2_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 16, 16), (32768, 256, 16, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 131072, grid=grid(131072), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x3_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 128, 8, 8), (8192, 64, 8, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf13, primals_9, 32768, grid=grid(32768), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [x4_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf13, buf14, buf15, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 4, 4), (4096, 16, 4, 1))
buf17 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_8.run(buf17, 8, grid=grid(8), stream=stream0)
buf18 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_9.run(buf18, 8, grid=grid(8), stream=stream0)
buf19 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_8.run(buf19, 8, grid=grid(8), stream=stream0)
buf20 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_9.run(buf20, 8, grid=grid(8), stream=stream0)
buf21 = empty_strided_cuda((8, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_10.run(buf21, 8, grid=grid(8), stream=stream0)
buf23 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x5_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_10.run(buf23, 8, grid=grid(8), stream=stream0)
buf24 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x5, x5_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_11.run(buf25, buf17, buf19, buf16, primals_11, buf20, buf21, buf18, buf23, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_12.run(buf27, 16, grid=grid(16), stream=stream0)
buf28 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf28, 16, grid=grid(16), stream=stream0)
buf29 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_12.run(buf29, 16, grid=grid(16), stream=stream0)
buf30 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf30, 16, grid=grid(16), stream=stream0)
buf31 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_14.run(buf31, 16, grid=grid(16), stream=stream0)
buf33 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x6_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_14.run(buf33, 16, grid=grid(16), stream=stream0)
buf34 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32)
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x6, x6_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_15.run(buf35, buf27, buf29, buf26, primals_13, buf30, buf31, buf28, buf33, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 128, 16, 16), (32768, 256, 16, 1))
buf37 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_16.run(buf37, 32, grid=grid(32), stream=stream0)
buf38 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_17.run(buf38, 32, grid=grid(32), stream=stream0)
buf39 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_16.run(buf39, 32, grid=grid(32), stream=stream0)
buf40 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_17.run(buf40, 32, grid=grid(32), stream=stream0)
buf41 = empty_strided_cuda((32, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_18.run(buf41, 32, grid=grid(32), stream=stream0)
buf43 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x7_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_18.run(buf43, 32, grid=grid(32), stream=stream0)
buf44 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
buf45 = buf44; del buf44 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, x7, x7_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_19.run(buf45, buf37, buf39, buf36, primals_15, buf40, buf41, buf38, buf43, 524288, grid=grid(524288), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf47 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_20.run(buf47, 64, grid=grid(64), stream=stream0)
buf48 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_21.run(buf48, 64, grid=grid(64), stream=stream0)
buf49 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp]
triton_poi_fused__to_copy_20.run(buf49, 64, grid=grid(64), stream=stream0)
buf50 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_21.run(buf50, 64, grid=grid(64), stream=stream0)
buf51 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub]
triton_poi_fused__to_copy_arange_clamp_mul_sub_22.run(buf51, 64, grid=grid(64), stream=stream0)
buf53 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x8_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_arange_clamp_mul_sub_22.run(buf53, 64, grid=grid(64), stream=stream0)
buf54 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
buf55 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x8, x8_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_23.run(buf55, buf47, buf49, buf46, primals_17, buf50, buf51, buf48, buf53, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, x9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf57, primals_19, 1048576, grid=grid(1048576), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf58 = extern_kernels.convolution(buf57, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf59 = buf58; del buf58 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf59, primals_21, 1048576, grid=grid(1048576), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [x11], Original ATen: [aten.convolution]
buf60 = extern_kernels.convolution(buf59, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf61 = buf60; del buf60 # reuse
# Topologically Sorted Source Nodes: [x11, y], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_24.run(buf61, primals_23, 65536, grid=grid(65536), stream=stream0)
del primals_23
buf62 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_7, x8], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_25.run(buf46, primals_17, buf62, 524288, grid=grid(524288), stream=stream0)
del buf46
del primals_17
buf63 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_6, x7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_26.run(buf36, primals_15, buf63, 131072, grid=grid(131072), stream=stream0)
del buf36
del primals_15
buf64 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_5, x6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_27.run(buf26, primals_13, buf64, 65536, grid=grid(65536), stream=stream0)
del buf26
del primals_13
buf65 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, x5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_28.run(buf16, primals_11, buf65, 16384, grid=grid(16384), stream=stream0)
del buf16
del primals_11
return (buf61, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19, buf20, buf21, buf23, buf25, buf27, buf28, buf29, buf30, buf31, buf33, buf35, buf37, buf38, buf39, buf40, buf41, buf43, buf45, buf47, buf48, buf49, buf50, buf51, buf53, buf55, buf57, buf59, buf61, buf62, buf63, buf64, buf65, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net_basic(nn.Module):
"""基础网络,仅包含保存、加载模型的功能"""
def __init__(self):
super(Net_basic, self).__init__()
def load(self, path):
"""加载指定模型"""
self.load_state_dict(torch.load(path))
def save(self, path):
"""保存模型"""
torch.save(self.state_dict(), path)
class Net(Net_basic):
def __init__(self, inp, out, pooling=[1, 1, 1, 1]):
super(Net, self).__init__()
self.pooling = pooling
self.conv1 = nn.Conv2d(inp, 64, 3, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.conv4 = nn.Conv2d(128, 128, 3, padding=1)
self.conv5 = nn.Conv2d(128, 256, 3, padding=1)
self.conv6 = nn.Conv2d(256, 256, 3, padding=1)
self.conv7 = nn.Conv2d(256, 128, 3, padding=1)
self.conv8 = nn.Conv2d(128, 128, 3, padding=1)
self.conv9 = nn.Conv2d(128, 64, 3, padding=1)
self.conv10 = nn.Conv2d(64, 64, 3, padding=1)
self.conv11 = nn.Conv2d(64, out, 1, padding=0)
self.up = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
def forward(self, x):
x1 = F.relu(self.conv1(x))
if self.pooling[0] == 1:
x1 = F.max_pool2d(x1, 2)
x2 = F.relu(self.conv2(x1))
if self.pooling[1] == 1:
x2 = F.max_pool2d(x2, 2)
x3 = F.relu(self.conv3(x2))
if self.pooling[2] == 1:
x3 = F.max_pool2d(x3, 2)
x4 = F.relu(self.conv4(x3))
if self.pooling[3] == 1:
x4 = F.max_pool2d(x4, 2)
x5 = F.relu(self.conv5(x4))
if self.pooling[3] == 1:
x5 = self.up(x5)
x6 = F.relu(self.conv6(x5))
if self.pooling[2] == 1:
x6 = self.up(x6)
x7 = F.relu(self.conv7(x6))
if self.pooling[1] == 1:
x7 = self.up(x7)
x8 = F.relu(self.conv8(x7))
if self.pooling[0] == 1:
x8 = self.up(x8)
x9 = F.relu(self.conv9(x8))
x10 = F.relu(self.conv10(x9))
x11 = self.conv11(x10)
y = torch.sigmoid(x11)
return y
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'inp': 4, 'out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused__to_copy_8(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_9(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_10(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_11(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x6 = xindex // 64
x2 = xindex // 64 % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + 4 * tmp28 + 16 * x6), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + 4 * tmp28 + 16 * x6), None,
eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + x4, tmp41, None)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 7, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_14(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4666666666666667
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_15(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x6 = xindex // 256
x2 = xindex // 256 % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + 8 * tmp28 + 64 * x6), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + 8 * tmp28 + 64 * x6), None,
eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + x4, tmp41, None)
@triton.jit
def triton_poi_fused__to_copy_16(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_17(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 15, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_18(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.4838709677419355
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_19(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x6 = xindex // 1024
x2 = xindex // 1024 % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + 16 * tmp28 + 256 * x6), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + 16 * tmp28 + 256 * x6), None,
eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + x4, tmp41, None)
@triton.jit
def triton_poi_fused__to_copy_20(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_clamp_21(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 31, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_clamp_mul_sub_22(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.49206349206349204
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = triton_helpers.maximum(tmp8, tmp4)
tmp10 = 1.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_23(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x6 = xindex // 4096
x2 = xindex // 4096 % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp17 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp19 = tmp18 + tmp10
tmp20 = triton_helpers.maximum(tmp12, tmp19)
tmp21 = tmp20 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp8 + 32 * tmp28 + 1024 * x6), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = triton_helpers.maximum(tmp12, tmp30)
tmp32 = tl.load(in_ptr2 + (tmp17 + 32 * tmp28 + 1024 * x6), None,
eviction_policy='evict_last')
tmp33 = tmp32 + tmp10
tmp34 = triton_helpers.maximum(tmp12, tmp33)
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp22
tmp37 = tmp31 + tmp36
tmp38 = tmp37 - tmp24
tmp40 = tmp38 * tmp39
tmp41 = tmp24 + tmp40
tl.store(in_out_ptr0 + x4, tmp41, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_24(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_25(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_26(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_27(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_28(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_21, (64,), (1,))
assert_size_stride(primals_22, (4, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_23, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2,
buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(262144)](buf5, primals_5,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(65536)](buf5, buf6,
buf7, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 16, 16), (32768, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(131072)](buf9, primals_7,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
buf11 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.int8
)
triton_poi_fused_max_pool2d_with_indices_5[grid(32768)](buf9, buf10,
buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 128, 8, 8), (8192, 64, 8, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_6[grid(32768)](buf13, primals_9,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.int8
)
triton_poi_fused_max_pool2d_with_indices_7[grid(8192)](buf13, buf14,
buf15, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 4, 4), (4096, 16, 4, 1))
buf17 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_8[grid(8)](buf17, 8, XBLOCK=8, num_warps=
1, num_stages=1)
buf18 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_9[grid(8)](buf18, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf19 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_8[grid(8)](buf19, 8, XBLOCK=8, num_warps=
1, num_stages=1)
buf20 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused_add_clamp_9[grid(8)](buf20, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf21 = empty_strided_cuda((8,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_10[grid(8)](buf21, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_10[grid(8)](buf23, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf25 = buf24
del buf24
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_11[grid
(65536)](buf25, buf17, buf19, buf16, primals_11, buf20, buf21,
buf18, buf23, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf26 = extern_kernels.convolution(buf25, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_12[grid(16)](buf27, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_13[grid(16)](buf28, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_12[grid(16)](buf29, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused_add_clamp_13[grid(16)](buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_14[grid(16)](buf31,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf33 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_14[grid(16)](buf33,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.float32)
buf35 = buf34
del buf34
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_15[grid
(262144)](buf35, buf27, buf29, buf26, primals_13, buf30, buf31,
buf28, buf33, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf36 = extern_kernels.convolution(buf35, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 128, 16, 16), (32768, 256, 16, 1))
buf37 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_16[grid(32)](buf37, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_17[grid(32)](buf38, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf39 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_16[grid(32)](buf39, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_add_clamp_17[grid(32)](buf40, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf41 = empty_strided_cuda((32,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_18[grid(32)](buf41,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf43 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_18[grid(32)](buf43,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf44 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
buf45 = buf44
del buf44
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_19[grid
(524288)](buf45, buf37, buf39, buf36, primals_15, buf40, buf41,
buf38, buf43, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf46 = extern_kernels.convolution(buf45, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf47 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_20[grid(64)](buf47, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf48 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_21[grid(64)](buf48, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf49 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_20[grid(64)](buf49, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf50 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_21[grid(64)](buf50, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf51 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_22[grid(64)](buf51,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf53 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_arange_clamp_mul_sub_22[grid(64)](buf53,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf54 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf55 = buf54
del buf54
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_23[grid
(2097152)](buf55, buf47, buf49, buf46, primals_17, buf50, buf51,
buf48, buf53, 2097152, XBLOCK=1024, num_warps=4, num_stages=1)
buf56 = extern_kernels.convolution(buf55, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_0[grid(1048576)](buf57,
primals_19, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf58 = extern_kernels.convolution(buf57, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf59 = buf58
del buf58
triton_poi_fused_convolution_relu_0[grid(1048576)](buf59,
primals_21, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf60 = extern_kernels.convolution(buf59, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf61 = buf60
del buf60
triton_poi_fused_convolution_sigmoid_24[grid(65536)](buf61,
primals_23, 65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf62 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_25[grid(524288)](
buf46, primals_17, buf62, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf46
del primals_17
buf63 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_26[grid(131072)](
buf36, primals_15, buf63, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf36
del primals_15
buf64 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_27[grid(65536)](
buf26, primals_13, buf64, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del buf26
del primals_13
buf65 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_28[grid(16384)](
buf16, primals_11, buf65, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf16
del primals_11
return (buf61, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, buf1, buf2, buf3, buf5, buf6, buf7, buf9,
buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19, buf20,
buf21, buf23, buf25, buf27, buf28, buf29, buf30, buf31, buf33,
buf35, buf37, buf38, buf39, buf40, buf41, buf43, buf45, buf47,
buf48, buf49, buf50, buf51, buf53, buf55, buf57, buf59, buf61,
buf62, buf63, buf64, buf65)
class Net_basic(nn.Module):
"""基础网络,仅包含保存、加载模型的功能"""
def __init__(self):
super(Net_basic, self).__init__()
def load(self, path):
"""加载指定模型"""
self.load_state_dict(torch.load(path))
def save(self, path):
"""保存模型"""
torch.save(self.state_dict(), path)
class NetNew(Net_basic):
def __init__(self, inp, out, pooling=[1, 1, 1, 1]):
super(NetNew, self).__init__()
self.pooling = pooling
self.conv1 = nn.Conv2d(inp, 64, 3, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.conv4 = nn.Conv2d(128, 128, 3, padding=1)
self.conv5 = nn.Conv2d(128, 256, 3, padding=1)
self.conv6 = nn.Conv2d(256, 256, 3, padding=1)
self.conv7 = nn.Conv2d(256, 128, 3, padding=1)
self.conv8 = nn.Conv2d(128, 128, 3, padding=1)
self.conv9 = nn.Conv2d(128, 64, 3, padding=1)
self.conv10 = nn.Conv2d(64, 64, 3, padding=1)
self.conv11 = nn.Conv2d(64, out, 1, padding=0)
self.up = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_14 = self.conv7.weight
primals_15 = self.conv7.bias
primals_16 = self.conv8.weight
primals_17 = self.conv8.bias
primals_18 = self.conv9.weight
primals_19 = self.conv9.bias
primals_20 = self.conv10.weight
primals_21 = self.conv10.bias
primals_22 = self.conv11.weight
primals_23 = self.conv11.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0]
|
IewNixIl/graduation_project_under
|
Net
| false | 9,278 |
[
"MIT"
] | 0 |
67d0345208511bb06c35c3453227b2fa4ebef4a3
|
https://github.com/IewNixIl/graduation_project_under/tree/67d0345208511bb06c35c3453227b2fa4ebef4a3
|
SqueezeExcite
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x_se], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x_se => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/o5/co5kpgkyaabh4nd7yz4gzpyl7x35mwdhgusbruykvtydzlq2lizg.py
# Topologically Sorted Source Nodes: [x_se_1, x_se_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_se_1 => convolution
# x_se_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hc/chcuw27vv75itefu6xswbdhopf2pq6oaj4bww4whelczhqtkghqr.py
# Topologically Sorted Source Nodes: [x_se_3, add, relu6, truediv, x], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# relu6 => clamp_max, clamp_min
# truediv => div
# x => mul
# x_se_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_add_convolution_div_hardtanh_mul_2 = async_compile.triton('triton_poi_fused_add_convolution_div_hardtanh_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_hardtanh_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rh/crhdgkbcu2rbqzaxws3536qfrpjhmp6d6zhyebzqejxvceodywdi.py
# Topologically Sorted Source Nodes: [x_se_3, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
# Source node to ATen node mapping:
# add => add
# x_se_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3.0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add, 6), kwargs = {})
# %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {})
triton_poi_fused_add_convolution_hardtanh_backward_3 = async_compile.triton('triton_poi_fused_add_convolution_hardtanh_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_hardtanh_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_se], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_se_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_se_1, x_se_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_se_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_se_3, add, relu6, truediv, x], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
triton_poi_fused_add_convolution_div_hardtanh_mul_2.run(primals_1, buf4, primals_5, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_se_3, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
triton_poi_fused_add_convolution_hardtanh_backward_3.run(buf4, primals_5, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del primals_5
return (buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.utils.data.distributed
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)](
primals_1, buf4, primals_5, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExciteNew(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExciteNew, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, input_0):
primals_2 = self.conv_reduce.weight
primals_3 = self.conv_reduce.bias
primals_4 = self.conv_expand.weight
primals_5 = self.conv_expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Erfun76/insightface
|
SqueezeExcite
| false | 9,279 |
[
"MIT"
] | 0 |
148cef36a43a055f68d2b6a475f4aa38625ad8b4
|
https://github.com/Erfun76/insightface/tree/148cef36a43a055f68d2b6a475f4aa38625ad8b4
|
ConvRelu
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/qf/cqf7gwutkswogpoieukkzrlezczaf4jzo3cnrl7zupsezutoj3ez.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.backends.cudnn
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class ConvRelu(nn.Module):
def __init__(self, in_: 'int', out: 'int'):
super(ConvRelu, self).__init__()
self.conv = conv3x3(in_, out)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_': 4, 'out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class ConvReluNew(nn.Module):
def __init__(self, in_: 'int', out: 'int'):
super(ConvReluNew, self).__init__()
self.conv = conv3x3(in_, out)
self.activation = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ImmortalTurtle/robot-surgery-segmentation
|
ConvRelu
| false | 9,280 |
[
"MIT"
] | 0 |
dd86cec33d800c1104e9f89296ef8b1d38e968e2
|
https://github.com/ImmortalTurtle/robot-surgery-segmentation/tree/dd86cec33d800c1104e9f89296ef8b1d38e968e2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.